diff --git a/README.md b/README.md index 2b6688d..6a09372 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,9 @@ # Update 1. Upgrade all the PYG libraries to the newest versions. -2. Delete Python 3.7 support, because PyTorch 2.0 does not support it. +2. ~~Delete Python 3.7 support, because PyTorch 2.0 does not support it.~~ Old update # How to use -1. Be sure that you have installed ROCm 5.4.2 or newer versions. You can use ```rocm-smi``` to check it. +1. Be sure that you have installed ROCm 5.7 or newer versions. You can use ```rocm-smi``` to check it. 2. Go to Releases module, choose the versions of PYG you want and the right version of your Python environment. 3. Download the zip file. 4. ```unzip``` it, and ```cd ``` into the folder. @@ -12,15 +12,15 @@ # Build environment ``` Ubuntu 22.04 -PyTorch 2.0 -ROCm 5.4.3 +PyTorch 2.2.0 +ROCm 6.0.2 ``` # Current version ``` -pytorch_geometric-2.3.0 -pytorch_scatter-2.1.1 -pytorch_sparse-0.6.17 -pytorch_cluster-1.6.1 +pytorch_geometric-2.4.0 +pytorch_scatter-2.1.2 +pytorch_sparse-0.6.18 +pytorch_cluster-1.6.3 pytorch_spline_conv-1.2.2 ``` \ No newline at end of file diff --git a/pytorch_cluster-1.6.1/.github/workflows/building-conda.yml b/pytorch_cluster-1.6.1/.github/workflows/building-conda.yml deleted file mode 100644 index 49b5d16..0000000 --- a/pytorch_cluster-1.6.1/.github/workflows/building-conda.yml +++ /dev/null @@ -1,85 +0,0 @@ -name: Building Conda - -on: [workflow_dispatch] - -jobs: - - conda-build: - runs-on: ${{ matrix.os }} - - strategy: - fail-fast: false - matrix: - # We have trouble building for Windows - drop for now. - os: [ubuntu-18.04, macos-10.15] # windows-2019 - python-version: ['3.7', '3.8', '3.9', '3.10'] - torch-version: [1.13.0, 2.0.0] - # We have trouble building for `cu116` due to PyTorch 1.13.0 bugs - cuda-version: ['cpu', 'cu117', 'cu118'] - exclude: - - torch-version: 2.0.0 - python-version: '3.7' - - torch-version: 1.13.0 - cuda-version: 'cu118' - - os: macos-10.15 - cuda-version: 'cu117' - - os: macos-10.15 - cuda-version: 'cu118' - - steps: - - uses: actions/checkout@v2 - - name: Set up Conda for Python ${{ matrix.python-version }} - uses: conda-incubator/setup-miniconda@v2 - with: - python-version: ${{ matrix.python-version }} - - - name: Free up disk space - if: ${{ runner.os == 'Linux' }} - run: | - sudo rm -rf /usr/share/dotnet - - - name: Install Conda packages - run: | - conda install conda-build conda-verify --yes - shell: - bash -l {0} - - - name: Install CUDA ${{ matrix.cuda-version }} - if: ${{ matrix.cuda-version != 'cpu' }} - run: | - bash .github/workflows/cuda/${{ matrix.cuda-version }}-${{ runner.os }}.sh - shell: - bash - - - name: Build Conda package for CPU - if: ${{ matrix.cuda-version == 'cpu' }} - run: | - FORCE_CUDA=0 TORCH_CUDA_ARCH_LIST=0 ./conda/pytorch-cluster/build_conda.sh ${{ matrix.python-version }} ${{ matrix.torch-version }} ${{ matrix.cuda-version }} - shell: - bash -l {0} - - - name: Build Conda package for GPU - if: ${{ matrix.cuda-version != 'cpu' }} - run: | - source .github/workflows/cuda/${{ matrix.cuda-version }}-${{ runner.os }}-env.sh - ./conda/pytorch-cluster/build_conda.sh ${{ matrix.python-version }} ${{ matrix.torch-version }} ${{ matrix.cuda-version }} - shell: - bash -l {0} - - - name: Publish Conda package on organization channel - run: | - conda install anaconda-client --yes - anaconda upload --force --label main $HOME/conda-bld/*/*.tar.bz2 - env: - ANACONDA_API_TOKEN: ${{ secrets.PYG_ANACONDA_TOKEN }} - shell: - bash -l {0} - - - name: Publish Conda package on personal channel - run: | - conda install anaconda-client --yes - anaconda upload --force --label main $HOME/conda-bld/*/*.tar.bz2 - env: - ANACONDA_API_TOKEN: ${{ secrets.RUSTY1S_ANACONDA_TOKEN }} - shell: - bash -l {0} diff --git a/pytorch_cluster-1.6.1/.github/workflows/building.yml b/pytorch_cluster-1.6.1/.github/workflows/building.yml deleted file mode 100644 index ae489f1..0000000 --- a/pytorch_cluster-1.6.1/.github/workflows/building.yml +++ /dev/null @@ -1,106 +0,0 @@ -name: Building Wheels - -on: [workflow_dispatch] - -jobs: - - wheel: - runs-on: ${{ matrix.os }} - - strategy: - fail-fast: false - matrix: - os: [ubuntu-18.04, macos-10.15, windows-2019] - python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] - torch-version: [1.13.0, 2.0.0] - cuda-version: ['cpu', 'cu116', 'cu117', 'cu118'] - exclude: - - torch-version: 1.13.0 - python-version: '3.11' - - torch-version: 2.0.0 - python-version: '3.7' - - torch-version: 1.13.0 - cuda-version: 'cu118' - - torch-version: 2.0.0 - cuda-version: 'cu116' - - os: macos-10.15 - cuda-version: 'cu116' - - os: macos-10.15 - cuda-version: 'cu117' - - os: macos-10.15 - cuda-version: 'cu118' - - steps: - - uses: actions/checkout@v2 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - - name: Upgrade pip - run: | - pip install --upgrade setuptools - pip list - - - name: Free up disk space - if: ${{ runner.os == 'Linux' }} - run: | - sudo rm -rf /usr/share/dotnet - - - name: Install CUDA ${{ matrix.cuda-version }} - if: ${{ matrix.cuda-version != 'cpu' }} - run: | - bash .github/workflows/cuda/${{ matrix.cuda-version }}-${{ runner.os }}.sh - - - name: Install PyTorch ${{ matrix.torch-version }}+${{ matrix.cuda-version }} - run: | - pip install torch==${{ matrix.torch-version }} --extra-index-url https://download.pytorch.org/whl/${{ matrix.cuda-version }} - python -c "import torch; print('PyTorch:', torch.__version__)" - python -c "import torch; print('CUDA:', torch.version.cuda)" - - - name: Set version - if: ${{ runner.os != 'macOS' }} - run: | - VERSION=`sed -n "s/^__version__ = '\(.*\)'/\1/p" torch_cluster/__init__.py` - TORCH_VERSION=`echo "pt${{ matrix.torch-version }}" | sed "s/..$//" | sed "s/\.//g"` - CUDA_VERSION=`echo ${{ matrix.cuda-version }}` - echo "New version name: $VERSION+$TORCH_VERSION$CUDA_VERSION" - sed -i "s/$VERSION/$VERSION+$TORCH_VERSION$CUDA_VERSION/" setup.py - sed -i "s/$VERSION/$VERSION+$TORCH_VERSION$CUDA_VERSION/" torch_cluster/__init__.py - shell: - bash - - - name: Install main package for CPU - if: ${{ matrix.cuda-version == 'cpu' }} - run: | - FORCE_ONLY_CPU=1 pip install -e . - shell: - bash - - - name: Install main package for GPU - if: ${{ matrix.cuda-version != 'cpu' }} - run: | - source .github/workflows/cuda/${{ matrix.cuda-version }}-${{ runner.os }}-env.sh - pip install -e . - shell: - bash - - - name: Test installation - run: | - python -c "import torch_cluster; print('torch-cluster:', torch_cluster.__version__)" - - - name: Build wheel - run: | - pip install wheel - python setup.py bdist_wheel --dist-dir=dist - - - name: Configure AWS - uses: aws-actions/configure-aws-credentials@v1 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: us-west-1 - - - name: Upload wheel - run: | - aws s3 sync dist s3://data.pyg.org/whl/torch-${{ matrix.torch-version }}+${{ matrix.cuda-version }} --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu117-Linux.sh b/pytorch_cluster-1.6.1/.github/workflows/cuda/cu117-Linux.sh deleted file mode 100644 index 0593f69..0000000 --- a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu117-Linux.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -OS=ubuntu1804 - -wget -nv https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/cuda-${OS}.pin -sudo mv cuda-${OS}.pin /etc/apt/preferences.d/cuda-repository-pin-600 -wget -nv https://developer.download.nvidia.com/compute/cuda/11.7.1/local_installers/cuda-repo-${OS}-11-7-local_11.7.1-515.65.01-1_amd64.deb -sudo dpkg -i cuda-repo-${OS}-11-7-local_11.7.1-515.65.01-1_amd64.deb -sudo cp /var/cuda-repo-${OS}-11-7-local/cuda-*-keyring.gpg /usr/share/keyrings/ - -sudo apt-get -qq update -sudo apt install cuda-nvcc-11-7 cuda-libraries-dev-11-7 -sudo apt clean - -rm -f https://developer.download.nvidia.com/compute/cuda/11.7.1/local_installers/cuda-repo-${OS}-11-7-local_11.7.1-515.65.01-1_amd64.deb diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu118-Linux.sh b/pytorch_cluster-1.6.1/.github/workflows/cuda/cu118-Linux.sh deleted file mode 100644 index 000a3bd..0000000 --- a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu118-Linux.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -OS=ubuntu1804 - -wget -nv https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/cuda-${OS}.pin -sudo mv cuda-${OS}.pin /etc/apt/preferences.d/cuda-repository-pin-600 -wget -nv https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda-repo-${OS}-11-8-local_11.8.0-520.61.05-1_amd64.deb -sudo dpkg -i cuda-repo-${OS}-11-8-local_11.8.0-520.61.05-1_amd64.deb -sudo cp /var/cuda-repo-${OS}-11-8-local/cuda-*-keyring.gpg /usr/share/keyrings/ - -sudo apt-get -qq update -sudo apt install cuda-nvcc-11-8 cuda-libraries-dev-11-8 -sudo apt clean - -rm -f https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda-repo-${OS}-11-8-local_11.8.0-520.61.05-1_amd64.deb diff --git a/pytorch_cluster-1.6.1/.github/workflows/testing.yml b/pytorch_cluster-1.6.1/.github/workflows/testing.yml deleted file mode 100644 index 6b177df..0000000 --- a/pytorch_cluster-1.6.1/.github/workflows/testing.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: Testing - -on: - push: - branches: - - master - pull_request: - -jobs: - - pytest: - runs-on: ${{ matrix.os }} - - strategy: - fail-fast: false - matrix: - os: [ubuntu-latest, windows-latest] - python-version: [3.8] - torch-version: [1.13.0, 2.0.0] - - steps: - - uses: actions/checkout@v2 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - - name: Install PyTorch ${{ matrix.torch-version }} - run: | - pip install torch==${{ matrix.torch-version }} --extra-index-url https://download.pytorch.org/whl/cpu - - - name: Install main package - run: | - pip install -e .[test] - - - name: Run test-suite - run: | - pytest --cov --cov-report=xml - - - name: Upload coverage - uses: codecov/codecov-action@v1 - if: success() - with: - fail_ci_if_error: false diff --git a/pytorch_cluster-1.6.1/CMakeLists.txt b/pytorch_cluster-1.6.1/CMakeLists.txt deleted file mode 100644 index e4f8385..0000000 --- a/pytorch_cluster-1.6.1/CMakeLists.txt +++ /dev/null @@ -1,88 +0,0 @@ -cmake_minimum_required(VERSION 3.0) -project(torchcluster) -set(CMAKE_CXX_STANDARD 14) -set(TORCHCLUSTER_VERSION 1.6.1) - -option(WITH_CUDA "Enable CUDA support" OFF) -option(WITH_PYTHON "Link to Python when building" ON) - -if(WITH_CUDA) - enable_language(CUDA) - add_definitions(-D__CUDA_NO_HALF_OPERATORS__) - add_definitions(-DWITH_CUDA) - set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr") -endif() - -if (WITH_PYTHON) - add_definitions(-DWITH_PYTHON) - find_package(Python3 COMPONENTS Development) -endif() -find_package(Torch REQUIRED) - -file(GLOB HEADERS csrc/*.h) -file(GLOB OPERATOR_SOURCES csrc/*.* csrc/cpu/*.*) -if(WITH_CUDA) - file(GLOB OPERATOR_SOURCES ${OPERATOR_SOURCES} csrc/cuda/*.h csrc/cuda/*.cu) -endif() - -add_library(${PROJECT_NAME} SHARED ${OPERATOR_SOURCES}) -target_link_libraries(${PROJECT_NAME} PRIVATE ${TORCH_LIBRARIES}) -if (WITH_PYTHON) - target_link_libraries(${PROJECT_NAME} PRIVATE Python3::Python) -endif() -set_target_properties(${PROJECT_NAME} PROPERTIES EXPORT_NAME TorchCluster) - -target_include_directories(${PROJECT_NAME} INTERFACE - $ - $) - -include(GNUInstallDirs) -include(CMakePackageConfigHelpers) - -set(TORCHCLUSTER_CMAKECONFIG_INSTALL_DIR "share/cmake/TorchCluster" CACHE STRING "install path for TorchClusterConfig.cmake") - -configure_package_config_file(cmake/TorchClusterConfig.cmake.in - "${CMAKE_CURRENT_BINARY_DIR}/TorchClusterConfig.cmake" - INSTALL_DESTINATION ${TORCHCLUSTER_CMAKECONFIG_INSTALL_DIR}) - -write_basic_package_version_file(${CMAKE_CURRENT_BINARY_DIR}/TorchClusterConfigVersion.cmake - VERSION ${TORCHCLUSTER_VERSION} - COMPATIBILITY AnyNewerVersion) - -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/TorchClusterConfig.cmake - ${CMAKE_CURRENT_BINARY_DIR}/TorchClusterConfigVersion.cmake - DESTINATION ${TORCHCLUSTER_CMAKECONFIG_INSTALL_DIR}) - -install(TARGETS ${PROJECT_NAME} - EXPORT TorchClusterTargets - LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} - ) - -install(EXPORT TorchClusterTargets - NAMESPACE TorchCluster:: - DESTINATION ${TORCHCLUSTER_CMAKECONFIG_INSTALL_DIR}) - -install(FILES ${HEADERS} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}) -install(FILES - csrc/cpu/fps_cpu.h - csrc/cpu/graclus_cpu.h - csrc/cpu/grid_cpu.h - csrc/cpu/rw_cpu.h - csrc/cpu/sampler_cpu.h - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/cpu) -if(WITH_CUDA) - install(FILES - csrc/cuda/fps_cuda.h - csrc/cuda/graclus_cuda.h - csrc/cuda/grid_cuda.h - csrc/cuda/knn_cuda.h - csrc/cuda/nearest_cuda.h - csrc/cuda/radius_cuda.h - csrc/cuda/rw_cuda.h - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/cuda) -endif() - -if(WITH_CUDA) - set_property(TARGET torch_cuda PROPERTY INTERFACE_COMPILE_OPTIONS "") - set_property(TARGET torch_cpu PROPERTY INTERFACE_COMPILE_OPTIONS "") -endif() diff --git a/pytorch_cluster-1.6.1/README.md b/pytorch_cluster-1.6.1/README.md deleted file mode 100644 index 61efba6..0000000 --- a/pytorch_cluster-1.6.1/README.md +++ /dev/null @@ -1,294 +0,0 @@ -[pypi-image]: https://badge.fury.io/py/torch-cluster.svg -[pypi-url]: https://pypi.python.org/pypi/torch-cluster -[testing-image]: https://github.com/rusty1s/pytorch_cluster/actions/workflows/testing.yml/badge.svg -[testing-url]: https://github.com/rusty1s/pytorch_cluster/actions/workflows/testing.yml -[linting-image]: https://github.com/rusty1s/pytorch_cluster/actions/workflows/linting.yml/badge.svg -[linting-url]: https://github.com/rusty1s/pytorch_cluster/actions/workflows/linting.yml -[coverage-image]: https://codecov.io/gh/rusty1s/pytorch_cluster/branch/master/graph/badge.svg -[coverage-url]: https://codecov.io/github/rusty1s/pytorch_cluster?branch=master - -# PyTorch Cluster - -[![PyPI Version][pypi-image]][pypi-url] -[![Testing Status][testing-image]][testing-url] -[![Linting Status][linting-image]][linting-url] -[![Code Coverage][coverage-image]][coverage-url] - --------------------------------------------------------------------------------- - -This package consists of a small extension library of highly optimized graph cluster algorithms for the use in [PyTorch](http://pytorch.org/). -The package consists of the following clustering algorithms: - -* **[Graclus](#graclus)** from Dhillon *et al.*: [Weighted Graph Cuts without Eigenvectors: A Multilevel Approach](http://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf) (PAMI 2007) -* **[Voxel Grid Pooling](#voxelgrid)** from, *e.g.*, Simonovsky and Komodakis: [Dynamic Edge-Conditioned Filters in Convolutional Neural Networks on Graphs](https://arxiv.org/abs/1704.02901) (CVPR 2017) -* **[Iterative Farthest Point Sampling](#farthestpointsampling)** from, *e.g.* Qi *et al.*: [PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space](https://arxiv.org/abs/1706.02413) (NIPS 2017) -* **[k-NN](#knn-graph)** and **[Radius](#radius-graph)** graph generation -* Clustering based on **[Nearest](#nearest)** points -* **[Random Walk Sampling](#randomwalk-sampling)** from, *e.g.*, Grover and Leskovec: [node2vec: Scalable Feature Learning for Networks](https://arxiv.org/abs/1607.00653) (KDD 2016) - -All included operations work on varying data types and are implemented both for CPU and GPU. - -## Installation - -### Anaconda - -**Update:** You can now install `pytorch-cluster` via [Anaconda](https://anaconda.org/pyg/pytorch-cluster) for all major OS/PyTorch/CUDA combinations 🤗 -Given that you have [`pytorch >= 1.8.0` installed](https://pytorch.org/get-started/locally/), simply run - -``` -conda install pytorch-cluster -c pyg -``` - -### Binaries - -We alternatively provide pip wheels for all major OS/PyTorch/CUDA combinations, see [here](https://data.pyg.org/whl). - -#### PyTorch 2.0 - -To install the binaries for PyTorch 2.0.0, simply run - -``` -pip install torch-spline-conv -f https://data.pyg.org/whl/torch-2.0.0+${CUDA}.html -``` - -where `${CUDA}` should be replaced by either `cpu`, `cu117`, or `cu118` depending on your PyTorch installation. - -| | `cpu` | `cu117` | `cu118` | -|-------------|-------|---------|---------| -| **Linux** | ✅ | ✅ | ✅ | -| **Windows** | ✅ | ✅ | ✅ | -| **macOS** | ✅ | | | - -#### PyTorch 1.13 - -To install the binaries for PyTorch 1.13.0, simply run - -``` -pip install torch-spline-conv -f https://data.pyg.org/whl/torch-1.13.0+${CUDA}.html -``` - -where `${CUDA}` should be replaced by either `cpu`, `cu116`, or `cu117` depending on your PyTorch installation. - -| | `cpu` | `cu116` | `cu117` | -|-------------|-------|---------|---------| -| **Linux** | ✅ | ✅ | ✅ | -| **Windows** | ✅ | ✅ | ✅ | -| **macOS** | ✅ | | | - -**Note:** Binaries of older versions are also provided for PyTorch 1.4.0, PyTorch 1.5.0, PyTorch 1.6.0, PyTorch 1.7.0/1.7.1, PyTorch 1.8.0/1.8.1, PyTorch 1.9.0, PyTorch 1.10.0/1.10.1/1.10.2, PyTorch 1.11.0 and PyTorch 1.12.0/1.12.1 (following the same procedure). -For older versions, you need to explicitly specify the latest supported version number or install via `pip install --no-index` in order to prevent a manual installation from source. -You can look up the latest supported version number [here](https://data.pyg.org/whl). - -### From source - -Ensure that at least PyTorch 1.4.0 is installed and verify that `cuda/bin` and `cuda/include` are in your `$PATH` and `$CPATH` respectively, *e.g.*: - -``` -$ python -c "import torch; print(torch.__version__)" ->>> 1.4.0 - -$ python -c "import torch; print(torch.__version__)" ->>> 1.1.0 - -$ echo $PATH ->>> /usr/local/cuda/bin:... - -$ echo $CPATH ->>> /usr/local/cuda/include:... -``` - -Then run: - -``` -pip install torch-cluster -``` - -When running in a docker container without NVIDIA driver, PyTorch needs to evaluate the compute capabilities and may fail. -In this case, ensure that the compute capabilities are set via `TORCH_CUDA_ARCH_LIST`, *e.g.*: - -``` -export TORCH_CUDA_ARCH_LIST = "6.0 6.1 7.2+PTX 7.5+PTX" -``` - -## Functions - -### Graclus - -A greedy clustering algorithm of picking an unmarked vertex and matching it with one its unmarked neighbors (that maximizes its edge weight). -The GPU algorithm is adapted from Fagginger Auer and Bisseling: [A GPU Algorithm for Greedy Graph Matching](http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf) (LNCS 2012) - -```python -import torch -from torch_cluster import graclus_cluster - -row = torch.tensor([0, 1, 1, 2]) -col = torch.tensor([1, 0, 2, 1]) -weight = torch.tensor([1., 1., 1., 1.]) # Optional edge weights. - -cluster = graclus_cluster(row, col, weight) -``` - -``` -print(cluster) -tensor([0, 0, 1]) -``` - -### VoxelGrid - -A clustering algorithm, which overlays a regular grid of user-defined size over a point cloud and clusters all points within a voxel. - -```python -import torch -from torch_cluster import grid_cluster - -pos = torch.tensor([[0., 0.], [11., 9.], [2., 8.], [2., 2.], [8., 3.]]) -size = torch.Tensor([5, 5]) - -cluster = grid_cluster(pos, size) -``` - -``` -print(cluster) -tensor([0, 5, 3, 0, 1]) -``` - -### FarthestPointSampling - -A sampling algorithm, which iteratively samples the most distant point with regard to the rest points. - -```python -import torch -from torch_cluster import fps - -x = torch.tensor([[-1., -1.], [-1., 1.], [1., -1.], [1., 1.]]) -batch = torch.tensor([0, 0, 0, 0]) -index = fps(x, batch, ratio=0.5, random_start=False) -``` - -``` -print(index) -tensor([0, 3]) -``` - -### kNN-Graph - -Computes graph edges to the nearest *k* points. - -**Args:** - -* **x** *(Tensor)*: Node feature matrix of shape `[N, F]`. -* **k** *(int)*: The number of neighbors. -* **batch** *(LongTensor, optional)*: Batch vector of shape `[N]`, which assigns each node to a specific example. `batch` needs to be sorted. (default: `None`) -* **loop** *(bool, optional)*: If `True`, the graph will contain self-loops. (default: `False`) -* **flow** *(string, optional)*: The flow direction when using in combination with message passing (`"source_to_target"` or `"target_to_source"`). (default: `"source_to_target"`) -* **cosine** *(boolean, optional)*: If `True`, will use the Cosine distance instead of Euclidean distance to find nearest neighbors. (default: `False`) -* **num_workers** *(int)*: Number of workers to use for computation. Has no effect in case `batch` is not `None`, or the input lies on the GPU. (default: `1`) - -```python -import torch -from torch_cluster import knn_graph - -x = torch.tensor([[-1., -1.], [-1., 1.], [1., -1.], [1., 1.]]) -batch = torch.tensor([0, 0, 0, 0]) -edge_index = knn_graph(x, k=2, batch=batch, loop=False) -``` - -``` -print(edge_index) -tensor([[1, 2, 0, 3, 0, 3, 1, 2], - [0, 0, 1, 1, 2, 2, 3, 3]]) -``` - -### Radius-Graph - -Computes graph edges to all points within a given distance. - -**Args:** - -* **x** *(Tensor)*: Node feature matrix of shape `[N, F]`. -* **r** *(float)*: The radius. -* **batch** *(LongTensor, optional)*: Batch vector of shape `[N]`, which assigns each node to a specific example. `batch` needs to be sorted. (default: `None`) -* **loop** *(bool, optional)*: If `True`, the graph will contain self-loops. (default: `False`) -* **max_num_neighbors** *(int, optional)*: The maximum number of neighbors to return for each element. If the number of actual neighbors is greater than `max_num_neighbors`, returned neighbors are picked randomly. (default: `32`) -* **flow** *(string, optional)*: The flow direction when using in combination with message passing (`"source_to_target"` or `"target_to_source"`). (default: `"source_to_target"`) -* **num_workers** *(int)*: Number of workers to use for computation. Has no effect in case `batch` is not `None`, or the input lies on the GPU. (default: `1`) - -```python -import torch -from torch_cluster import radius_graph - -x = torch.tensor([[-1., -1.], [-1., 1.], [1., -1.], [1., 1.]]) -batch = torch.tensor([0, 0, 0, 0]) -edge_index = radius_graph(x, r=2.5, batch=batch, loop=False) -``` - -``` -print(edge_index) -tensor([[1, 2, 0, 3, 0, 3, 1, 2], - [0, 0, 1, 1, 2, 2, 3, 3]]) -``` - -### Nearest - -Clusters points in *x* together which are nearest to a given query point in *y*. -`batch_{x,y}` vectors need to be sorted. - -```python -import torch -from torch_cluster import nearest - -x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) -batch_x = torch.tensor([0, 0, 0, 0]) -y = torch.Tensor([[-1, 0], [1, 0]]) -batch_y = torch.tensor([0, 0]) -cluster = nearest(x, y, batch_x, batch_y) -``` - -``` -print(cluster) -tensor([0, 0, 1, 1]) -``` - -### RandomWalk-Sampling - -Samples random walks of length `walk_length` from all node indices in `start` in the graph given by `(row, col)`. - -```python -import torch -from torch_cluster import random_walk - -row = torch.tensor([0, 1, 1, 1, 2, 2, 3, 3, 4, 4]) -col = torch.tensor([1, 0, 2, 3, 1, 4, 1, 4, 2, 3]) -start = torch.tensor([0, 1, 2, 3, 4]) - -walk = random_walk(row, col, start, walk_length=3) -``` - -``` -print(walk) -tensor([[0, 1, 2, 4], - [1, 3, 4, 2], - [2, 4, 2, 1], - [3, 4, 2, 4], - [4, 3, 1, 0]]) -``` - -## Running tests - -``` -pytest -``` - -## C++ API - -`torch-cluster` also offers a C++ API that contains C++ equivalent of python models. - -``` -export Torch_DIR=`python -c 'import torch;print(torch.utils.cmake_prefix_path)'` -mkdir build -cd build -# Add -DWITH_CUDA=on support for the CUDA if needed -cmake .. -make -make install -``` diff --git a/pytorch_cluster-1.6.1/conda/pytorch-cluster/README.md b/pytorch_cluster-1.6.1/conda/pytorch-cluster/README.md deleted file mode 100644 index 6207e22..0000000 --- a/pytorch_cluster-1.6.1/conda/pytorch-cluster/README.md +++ /dev/null @@ -1,3 +0,0 @@ -``` -./build_conda.sh 3.9 2.0.0 cu117 # python, pytorch and cuda version -``` diff --git a/pytorch_cluster-1.6.1/conda/pytorch-cluster/build_conda.sh b/pytorch_cluster-1.6.1/conda/pytorch-cluster/build_conda.sh deleted file mode 100644 index 21d94c2..0000000 --- a/pytorch_cluster-1.6.1/conda/pytorch-cluster/build_conda.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -export PYTHON_VERSION=$1 -export TORCH_VERSION=$2 -export CUDA_VERSION=$3 - -export CONDA_PYTORCH_CONSTRAINT="pytorch==${TORCH_VERSION%.*}.*" - -if [ "${CUDA_VERSION}" = "cpu" ]; then - export CONDA_CUDATOOLKIT_CONSTRAINT="cpuonly # [not osx]" -else - case $CUDA_VERSION in - cu118) - export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.8.*" - ;; - cu117) - export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.7.*" - ;; - cu116) - if [ "${TORCH_VERSION}" = "1.12.0" ]; then - export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.6.*" - else - export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.6.*" - fi - ;; - cu115) - export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.5.*" - ;; - cu113) - export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.3.*" - ;; - cu111) - export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.1.*" - ;; - cu102) - export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==10.2.*" - ;; - cu101) - export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==10.1.*" - ;; - *) - echo "Unrecognized CUDA_VERSION=$CUDA_VERSION" - exit 1 - ;; - esac -fi - -echo "PyTorch $TORCH_VERSION+$CUDA_VERSION" -echo "- $CONDA_PYTORCH_CONSTRAINT" -echo "- $CONDA_CUDATOOLKIT_CONSTRAINT" - -if [ "${TORCH_VERSION}" = "1.12.0" ] && [ "${CUDA_VERSION}" = "cu116" ]; then - conda build . -c pytorch -c default -c nvidia -c conda-forge --output-folder "$HOME/conda-bld" -else - conda build . -c pytorch -c default -c nvidia --output-folder "$HOME/conda-bld" -fi diff --git a/pytorch_cluster-1.6.1/conda/pytorch-cluster/meta.yaml b/pytorch_cluster-1.6.1/conda/pytorch-cluster/meta.yaml deleted file mode 100644 index 897cb5c..0000000 --- a/pytorch_cluster-1.6.1/conda/pytorch-cluster/meta.yaml +++ /dev/null @@ -1,39 +0,0 @@ -package: - name: pytorch-cluster - version: 1.6.1 - -source: - path: ../.. - -requirements: - build: - - {{ compiler('c') }} # [win] - - host: - - pip - - python {{ environ.get('PYTHON_VERSION') }} - - {{ environ.get('CONDA_PYTORCH_CONSTRAINT') }} - - {{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }} - - run: - - python {{ environ.get('PYTHON_VERSION') }} - - {{ environ.get('CONDA_PYTORCH_CONSTRAINT') }} - - {{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }} - -build: - string: py{{ environ.get('PYTHON_VERSION').replace('.', '') }}_torch_{{ environ['TORCH_VERSION'] }}_{{ environ['CUDA_VERSION'] }} - script: pip install . - script_env: - - FORCE_CUDA - - TORCH_CUDA_ARCH_LIST - -test: - imports: - - torch_cluster - requires: - - scipy - -about: - home: https://github.com/rusty1s/pytorch_cluster - license: MIT - summary: PyTorch Extension Library of Optimized Graph Cluster Algorithms diff --git a/pytorch_cluster-1.6.1/setup.cfg b/pytorch_cluster-1.6.1/setup.cfg deleted file mode 100644 index dafda37..0000000 --- a/pytorch_cluster-1.6.1/setup.cfg +++ /dev/null @@ -1,19 +0,0 @@ -[metadata] -long_description=file: README.md -long_description_content_type=text/markdown - -classifiers = - Development Status :: 5 - Production/Stable - License :: OSI Approved :: MIT License - Programming Language :: Python - Programming Language :: Python :: 3.7 - Programming Language :: Python :: 3.8 - Programming Language :: Python :: 3.9 - Programming Language :: Python :: 3.10 - Programming Language :: Python :: 3 :: Only - -[aliases] -test = pytest - -[tool:pytest] -addopts = --capture=no diff --git a/pytorch_cluster-1.6.1/setup.py b/pytorch_cluster-1.6.1/setup.py deleted file mode 100644 index 0b8c798..0000000 --- a/pytorch_cluster-1.6.1/setup.py +++ /dev/null @@ -1,150 +0,0 @@ -import glob -import os -import os.path as osp -import platform -import sys -from itertools import product - -import torch -from setuptools import find_packages, setup -from torch.__config__ import parallel_info -from torch.utils.cpp_extension import (CUDA_HOME, BuildExtension, CppExtension, - CUDAExtension) - -__version__ = '1.6.1' -URL = 'https://github.com/rusty1s/pytorch_cluster' - -WITH_CUDA = False -if torch.cuda.is_available(): - WITH_CUDA = CUDA_HOME is not None or torch.version.hip - -suffices = ['cpu', 'cuda'] if WITH_CUDA else ['cpu'] -if os.getenv('FORCE_CUDA', '0') == '1': - suffices = ['cuda', 'cpu'] -if os.getenv('FORCE_ONLY_CUDA', '0') == '1': - suffices = ['cuda'] -if os.getenv('FORCE_ONLY_CPU', '0') == '1': - suffices = ['cpu'] - -BUILD_DOCS = os.getenv('BUILD_DOCS', '0') == '1' - - -def get_extensions(): - extensions = [] - - extensions_dir = osp.join('csrc') - main_files = glob.glob(osp.join(extensions_dir, '*.cpp')) - # remove generated 'hip' files, in case of rebuilds - main_files = [path for path in main_files if 'hip' not in path] - - for main, suffix in product(main_files, suffices): - define_macros = [('WITH_PYTHON', None)] - undef_macros = [] - - if sys.platform == 'win32': - define_macros += [('torchcluster_EXPORTS', None)] - - extra_compile_args = {'cxx': ['-O2']} - if not os.name == 'nt': # Not on Windows: - extra_compile_args['cxx'] += ['-Wno-sign-compare'] - extra_link_args = ['-s'] - - info = parallel_info() - if ('backend: OpenMP' in info and 'OpenMP not found' not in info - and sys.platform != 'darwin'): - extra_compile_args['cxx'] += ['-DAT_PARALLEL_OPENMP'] - if sys.platform == 'win32': - extra_compile_args['cxx'] += ['/openmp'] - else: - extra_compile_args['cxx'] += ['-fopenmp'] - else: - print('Compiling without OpenMP...') - - # Compile for mac arm64 - if (sys.platform == 'darwin' and platform.machine() == 'arm64'): - extra_compile_args['cxx'] += ['-arch', 'arm64'] - extra_link_args += ['-arch', 'arm64'] - - if suffix == 'cuda': - define_macros += [('WITH_CUDA', None)] - nvcc_flags = os.getenv('NVCC_FLAGS', '') - nvcc_flags = [] if nvcc_flags == '' else nvcc_flags.split(' ') - nvcc_flags += ['-O2'] - extra_compile_args['nvcc'] = nvcc_flags - - if torch.version.hip: - # USE_ROCM was added to later versions of PyTorch - # Define here to support older PyTorch versions as well: - define_macros += [('USE_ROCM', None)] - undef_macros += ['__HIP_NO_HALF_CONVERSIONS__'] - else: - nvcc_flags += ['--expt-relaxed-constexpr'] - - name = main.split(os.sep)[-1][:-4] - sources = [main] - - path = osp.join(extensions_dir, 'cpu', f'{name}_cpu.cpp') - if osp.exists(path): - sources += [path] - - path = osp.join(extensions_dir, 'cuda', f'{name}_cuda.cu') - if suffix == 'cuda' and osp.exists(path): - sources += [path] - - Extension = CppExtension if suffix == 'cpu' else CUDAExtension - extension = Extension( - f'torch_cluster._{name}_{suffix}', - sources, - include_dirs=[extensions_dir], - define_macros=define_macros, - undef_macros=undef_macros, - extra_compile_args=extra_compile_args, - extra_link_args=extra_link_args, - ) - extensions += [extension] - - return extensions - - -install_requires = [ - 'scipy', -] - -test_requires = [ - 'pytest', - 'pytest-cov', -] - -# work-around hipify abs paths -include_package_data = True -if torch.cuda.is_available() and torch.version.hip: - include_package_data = False - -setup( - name='torch_cluster', - version=__version__, - description=('PyTorch Extension Library of Optimized Graph Cluster ' - 'Algorithms'), - author='Matthias Fey', - author_email='matthias.fey@tu-dortmund.de', - url=URL, - download_url=f'{URL}/archive/{__version__}.tar.gz', - keywords=[ - 'pytorch', - 'geometric-deep-learning', - 'graph-neural-networks', - 'cluster-algorithms', - ], - python_requires='>=3.7', - install_requires=install_requires, - extras_require={ - 'test': test_requires, - }, - ext_modules=get_extensions() if not BUILD_DOCS else [], - cmdclass={ - 'build_ext': - BuildExtension.with_options(no_python_abi_suffix=True, use_ninja=False) - }, - packages=find_packages(), - include_package_data=include_package_data, -) diff --git a/pytorch_cluster-1.6.1/test/test_knn.py b/pytorch_cluster-1.6.1/test/test_knn.py deleted file mode 100644 index 8113a54..0000000 --- a/pytorch_cluster-1.6.1/test/test_knn.py +++ /dev/null @@ -1,79 +0,0 @@ -from itertools import product - -import pytest -import scipy.spatial -import torch -from torch_cluster import knn, knn_graph -from torch_cluster.testing import devices, grad_dtypes, tensor - - -def to_set(edge_index): - return set([(i, j) for i, j in edge_index.t().tolist()]) - - -@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices)) -def test_knn(dtype, device): - x = tensor([ - [-1, -1], - [-1, +1], - [+1, +1], - [+1, -1], - [-1, -1], - [-1, +1], - [+1, +1], - [+1, -1], - ], dtype, device) - y = tensor([ - [1, 0], - [-1, 0], - ], dtype, device) - - batch_x = tensor([0, 0, 0, 0, 1, 1, 1, 1], torch.long, device) - batch_y = tensor([0, 1], torch.long, device) - - edge_index = knn(x, y, 2) - assert to_set(edge_index) == set([(0, 2), (0, 3), (1, 0), (1, 1)]) - - edge_index = knn(x, y, 2, batch_x, batch_y) - assert to_set(edge_index) == set([(0, 2), (0, 3), (1, 4), (1, 5)]) - - if x.is_cuda: - edge_index = knn(x, y, 2, batch_x, batch_y, cosine=True) - assert to_set(edge_index) == set([(0, 2), (0, 3), (1, 4), (1, 5)]) - - # Skipping a batch - batch_x = tensor([0, 0, 0, 0, 2, 2, 2, 2], torch.long, device) - batch_y = tensor([0, 2], torch.long, device) - edge_index = knn(x, y, 2, batch_x, batch_y) - assert to_set(edge_index) == set([(0, 2), (0, 3), (1, 4), (1, 5)]) - - -@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices)) -def test_knn_graph(dtype, device): - x = tensor([ - [-1, -1], - [-1, +1], - [+1, +1], - [+1, -1], - ], dtype, device) - - edge_index = knn_graph(x, k=2, flow='target_to_source') - assert to_set(edge_index) == set([(0, 1), (0, 3), (1, 0), (1, 2), (2, 1), - (2, 3), (3, 0), (3, 2)]) - - edge_index = knn_graph(x, k=2, flow='source_to_target') - assert to_set(edge_index) == set([(1, 0), (3, 0), (0, 1), (2, 1), (1, 2), - (3, 2), (0, 3), (2, 3)]) - - -@pytest.mark.parametrize('dtype,device', product([torch.float], devices)) -def test_knn_graph_large(dtype, device): - x = torch.randn(1000, 3, dtype=dtype, device=device) - - edge_index = knn_graph(x, k=5, flow='target_to_source', loop=True) - - tree = scipy.spatial.cKDTree(x.cpu().numpy()) - _, col = tree.query(x.cpu(), k=5) - truth = set([(i, j) for i, ns in enumerate(col) for j in ns]) - - assert to_set(edge_index.cpu()) == truth diff --git a/pytorch_cluster-1.6.1/torch_cluster/__init__.py b/pytorch_cluster-1.6.1/torch_cluster/__init__.py deleted file mode 100644 index 158f59b..0000000 --- a/pytorch_cluster-1.6.1/torch_cluster/__init__.py +++ /dev/null @@ -1,60 +0,0 @@ -import importlib -import os.path as osp - -import torch - -__version__ = '1.6.1' - -for library in [ - '_version', '_grid', '_graclus', '_fps', '_rw', '_sampler', '_nearest', - '_knn', '_radius' -]: - cuda_spec = importlib.machinery.PathFinder().find_spec( - f'{library}_cuda', [osp.dirname(__file__)]) - cpu_spec = importlib.machinery.PathFinder().find_spec( - f'{library}_cpu', [osp.dirname(__file__)]) - spec = cuda_spec or cpu_spec - if spec is not None: - torch.ops.load_library(spec.origin) - else: # pragma: no cover - raise ImportError(f"Could not find module '{library}_cpu' in " - f"{osp.dirname(__file__)}") - -cuda_version = torch.ops.torch_cluster.cuda_version() -if torch.version.cuda is not None and cuda_version != -1: # pragma: no cover - if cuda_version < 10000: - major, minor = int(str(cuda_version)[0]), int(str(cuda_version)[2]) - else: - major, minor = int(str(cuda_version)[0:2]), int(str(cuda_version)[3]) - t_major, t_minor = [int(x) for x in torch.version.cuda.split('.')] - - if t_major != major: - raise RuntimeError( - f'Detected that PyTorch and torch_cluster were compiled with ' - f'different CUDA versions. PyTorch has CUDA version ' - f'{t_major}.{t_minor} and torch_cluster has CUDA version ' - f'{major}.{minor}. Please reinstall the torch_cluster that ' - f'matches your PyTorch install.') - -from .fps import fps # noqa -from .graclus import graclus_cluster # noqa -from .grid import grid_cluster # noqa -from .knn import knn, knn_graph # noqa -from .nearest import nearest # noqa -from .radius import radius, radius_graph # noqa -from .rw import random_walk # noqa -from .sampler import neighbor_sampler # noqa - -__all__ = [ - 'graclus_cluster', - 'grid_cluster', - 'fps', - 'nearest', - 'knn', - 'knn_graph', - 'radius', - 'radius_graph', - 'random_walk', - 'neighbor_sampler', - '__version__', -] diff --git a/pytorch_cluster-1.6.1/torch_cluster/fps.py b/pytorch_cluster-1.6.1/torch_cluster/fps.py deleted file mode 100644 index 7901dd5..0000000 --- a/pytorch_cluster-1.6.1/torch_cluster/fps.py +++ /dev/null @@ -1,70 +0,0 @@ -from typing import Optional - -import torch -from torch import Tensor - - -@torch.jit._overload # noqa -def fps(src, batch=None, ratio=None, random_start=True): # noqa - # type: (Tensor, Optional[Tensor], Optional[float], bool) -> Tensor - pass # pragma: no cover - - -@torch.jit._overload # noqa -def fps(src, batch=None, ratio=None, random_start=True): # noqa - # type: (Tensor, Optional[Tensor], Optional[Tensor], bool) -> Tensor - pass # pragma: no cover - - -def fps(src: torch.Tensor, batch=None, ratio=None, random_start=True): # noqa - r""""A sampling algorithm from the `"PointNet++: Deep Hierarchical Feature - Learning on Point Sets in a Metric Space" - `_ paper, which iteratively samples the - most distant point with regard to the rest points. - - Args: - src (Tensor): Point feature matrix - :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. - batch (LongTensor, optional): Batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each - node to a specific example. (default: :obj:`None`) - ratio (float or Tensor, optional): Sampling ratio. - (default: :obj:`0.5`) - random_start (bool, optional): If set to :obj:`False`, use the first - node in :math:`\mathbf{X}` as starting node. (default: obj:`True`) - - :rtype: :class:`LongTensor` - - - .. code-block:: python - - import torch - from torch_cluster import fps - - src = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) - batch = torch.tensor([0, 0, 0, 0]) - index = fps(src, batch, ratio=0.5) - """ - - r: Optional[Tensor] = None - if ratio is None: - r = torch.tensor(0.5, dtype=src.dtype, device=src.device) - elif isinstance(ratio, float): - r = torch.tensor(ratio, dtype=src.dtype, device=src.device) - else: - r = ratio - assert r is not None - - if batch is not None: - assert src.size(0) == batch.numel() - batch_size = int(batch.max()) + 1 - - deg = src.new_zeros(batch_size, dtype=torch.long) - deg.scatter_add_(0, batch, torch.ones_like(batch)) - - ptr = deg.new_zeros(batch_size + 1) - torch.cumsum(deg, 0, out=ptr[1:]) - else: - ptr = torch.tensor([0, src.size(0)], device=src.device) - - return torch.ops.torch_cluster.fps(src, ptr, r, random_start) diff --git a/pytorch_cluster-1.6.1/torch_cluster/knn.py b/pytorch_cluster-1.6.1/torch_cluster/knn.py deleted file mode 100644 index b981c46..0000000 --- a/pytorch_cluster-1.6.1/torch_cluster/knn.py +++ /dev/null @@ -1,127 +0,0 @@ -from typing import Optional - -import torch - - -@torch.jit.script -def knn(x: torch.Tensor, y: torch.Tensor, k: int, - batch_x: Optional[torch.Tensor] = None, - batch_y: Optional[torch.Tensor] = None, cosine: bool = False, - num_workers: int = 1) -> torch.Tensor: - r"""Finds for each element in :obj:`y` the :obj:`k` nearest points in - :obj:`x`. - - Args: - x (Tensor): Node feature matrix - :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. - y (Tensor): Node feature matrix - :math:`\mathbf{X} \in \mathbb{R}^{M \times F}`. - k (int): The number of neighbors. - batch_x (LongTensor, optional): Batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each - node to a specific example. :obj:`batch_x` needs to be sorted. - (default: :obj:`None`) - batch_y (LongTensor, optional): Batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^M`, which assigns each - node to a specific example. :obj:`batch_y` needs to be sorted. - (default: :obj:`None`) - cosine (boolean, optional): If :obj:`True`, will use the Cosine - distance instead of the Euclidean distance to find nearest - neighbors. (default: :obj:`False`) - num_workers (int): Number of workers to use for computation. Has no - effect in case :obj:`batch_x` or :obj:`batch_y` is not - :obj:`None`, or the input lies on the GPU. (default: :obj:`1`) - - :rtype: :class:`LongTensor` - - .. code-block:: python - - import torch - from torch_cluster import knn - - x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) - batch_x = torch.tensor([0, 0, 0, 0]) - y = torch.Tensor([[-1, 0], [1, 0]]) - batch_y = torch.tensor([0, 0]) - assign_index = knn(x, y, 2, batch_x, batch_y) - """ - if x.numel() == 0 or y.numel() == 0: - return torch.empty(2, 0, dtype=torch.long, device=x.device) - - x = x.view(-1, 1) if x.dim() == 1 else x - y = y.view(-1, 1) if y.dim() == 1 else y - x, y = x.contiguous(), y.contiguous() - - batch_size = 1 - if batch_x is not None: - assert x.size(0) == batch_x.numel() - batch_size = int(batch_x.max()) + 1 - if batch_y is not None: - assert y.size(0) == batch_y.numel() - batch_size = max(batch_size, int(batch_y.max()) + 1) - - ptr_x: Optional[torch.Tensor] = None - ptr_y: Optional[torch.Tensor] = None - if batch_size > 1: - assert batch_x is not None - assert batch_y is not None - arange = torch.arange(batch_size + 1, device=x.device) - ptr_x = torch.bucketize(arange, batch_x) - ptr_y = torch.bucketize(arange, batch_y) - - return torch.ops.torch_cluster.knn(x, y, ptr_x, ptr_y, k, cosine, - num_workers) - - -@torch.jit.script -def knn_graph(x: torch.Tensor, k: int, batch: Optional[torch.Tensor] = None, - loop: bool = False, flow: str = 'source_to_target', - cosine: bool = False, num_workers: int = 1) -> torch.Tensor: - r"""Computes graph edges to the nearest :obj:`k` points. - - Args: - x (Tensor): Node feature matrix - :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. - k (int): The number of neighbors. - batch (LongTensor, optional): Batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each - node to a specific example. :obj:`batch` needs to be sorted. - (default: :obj:`None`) - loop (bool, optional): If :obj:`True`, the graph will contain - self-loops. (default: :obj:`False`) - flow (string, optional): The flow direction when used in combination - with message passing (:obj:`"source_to_target"` or - :obj:`"target_to_source"`). (default: :obj:`"source_to_target"`) - cosine (boolean, optional): If :obj:`True`, will use the Cosine - distance instead of Euclidean distance to find nearest neighbors. - (default: :obj:`False`) - num_workers (int): Number of workers to use for computation. Has no - effect in case :obj:`batch` is not :obj:`None`, or the input lies - on the GPU. (default: :obj:`1`) - - :rtype: :class:`LongTensor` - - .. code-block:: python - - import torch - from torch_cluster import knn_graph - - x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) - batch = torch.tensor([0, 0, 0, 0]) - edge_index = knn_graph(x, k=2, batch=batch, loop=False) - """ - - assert flow in ['source_to_target', 'target_to_source'] - edge_index = knn(x, x, k if loop else k + 1, batch, batch, cosine, - num_workers) - - if flow == 'source_to_target': - row, col = edge_index[1], edge_index[0] - else: - row, col = edge_index[0], edge_index[1] - - if not loop: - mask = row != col - row, col = row[mask], col[mask] - - return torch.stack([row, col], dim=0) diff --git a/pytorch_cluster-1.6.1/.coveragerc b/pytorch_cluster-1.6.3/.coveragerc similarity index 100% rename from pytorch_cluster-1.6.1/.coveragerc rename to pytorch_cluster-1.6.3/.coveragerc diff --git a/pytorch_cluster-1.6.3/.github/workflows/building-conda.yml b/pytorch_cluster-1.6.3/.github/workflows/building-conda.yml new file mode 100644 index 0000000..5c7e961 --- /dev/null +++ b/pytorch_cluster-1.6.3/.github/workflows/building-conda.yml @@ -0,0 +1,85 @@ +name: Building Conda + +on: [workflow_dispatch] + +jobs: + + conda-build: + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + # We have trouble building for Windows - drop for now. + os: [ubuntu-20.04, macos-11] # windows-2019 + python-version: ['3.8', '3.9', '3.10', '3.11'] + torch-version: [2.0.0, 2.1.0] + cuda-version: ['cpu', 'cu117', 'cu118', 'cu121'] + exclude: + - torch-version: 2.0.0 + cuda-version: 'cu121' + - torch-version: 2.1.0 + cuda-version: 'cu117' + - os: macos-11 + cuda-version: 'cu117' + - os: macos-11 + cuda-version: 'cu118' + - os: macos-11 + cuda-version: 'cu121' + + steps: + - uses: actions/checkout@v2 + - name: Set up Conda for Python ${{ matrix.python-version }} + uses: conda-incubator/setup-miniconda@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Free Disk Space (Ubuntu) + if: ${{ runner.os == 'Linux' }} + uses: jlumbroso/free-disk-space@main + + - name: Install Conda packages + run: | + conda install conda-build conda-verify --yes + shell: + bash -l {0} + + - name: Install CUDA ${{ matrix.cuda-version }} + if: ${{ matrix.cuda-version != 'cpu' }} + run: | + bash .github/workflows/cuda/${{ matrix.cuda-version }}-${{ runner.os }}.sh + shell: + bash + + - name: Build Conda package for CPU + if: ${{ matrix.cuda-version == 'cpu' }} + run: | + FORCE_CUDA=0 TORCH_CUDA_ARCH_LIST=0 ./conda/pytorch-cluster/build_conda.sh ${{ matrix.python-version }} ${{ matrix.torch-version }} ${{ matrix.cuda-version }} + shell: + bash -l {0} + + - name: Build Conda package for GPU + if: ${{ matrix.cuda-version != 'cpu' }} + run: | + source .github/workflows/cuda/${{ matrix.cuda-version }}-${{ runner.os }}-env.sh + ./conda/pytorch-cluster/build_conda.sh ${{ matrix.python-version }} ${{ matrix.torch-version }} ${{ matrix.cuda-version }} + shell: + bash -l {0} + + - name: Publish Conda package on organization channel + run: | + conda install anaconda-client --yes + anaconda upload --force --label main $HOME/conda-bld/*/*.tar.bz2 + env: + ANACONDA_API_TOKEN: ${{ secrets.PYG_ANACONDA_TOKEN }} + shell: + bash -l {0} + + - name: Publish Conda package on personal channel + run: | + conda install anaconda-client --yes + anaconda upload --force --label main $HOME/conda-bld/*/*.tar.bz2 + env: + ANACONDA_API_TOKEN: ${{ secrets.RUSTY1S_ANACONDA_TOKEN }} + shell: + bash -l {0} diff --git a/pytorch_cluster-1.6.3/.github/workflows/building.yml b/pytorch_cluster-1.6.3/.github/workflows/building.yml new file mode 100644 index 0000000..9e00feb --- /dev/null +++ b/pytorch_cluster-1.6.3/.github/workflows/building.yml @@ -0,0 +1,102 @@ +name: Building Wheels + +on: [workflow_dispatch] + +jobs: + + wheel: + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + os: [ubuntu-20.04, macos-11, windows-2019] + python-version: ['3.8', '3.9', '3.10', '3.11'] + torch-version: [2.0.0, 2.1.0] + cuda-version: ['cpu', 'cu117', 'cu118', 'cu121'] + exclude: + - torch-version: 2.0.0 + cuda-version: 'cu121' + - torch-version: 2.1.0 + cuda-version: 'cu117' + - os: macos-11 + cuda-version: 'cu117' + - os: macos-11 + cuda-version: 'cu118' + - os: macos-11 + cuda-version: 'cu121' + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Upgrade pip + run: | + pip install --upgrade setuptools + pip install scipy==1.10.1 # Python 3.8 support + pip list + + - name: Free Disk Space (Ubuntu) + if: ${{ runner.os == 'Linux' }} + uses: jlumbroso/free-disk-space@main + + - name: Install CUDA ${{ matrix.cuda-version }} + if: ${{ matrix.cuda-version != 'cpu' }} + run: | + bash .github/workflows/cuda/${{ matrix.cuda-version }}-${{ runner.os }}.sh + + - name: Install PyTorch ${{ matrix.torch-version }}+${{ matrix.cuda-version }} + run: | + pip install torch==${{ matrix.torch-version }} --extra-index-url https://download.pytorch.org/whl/${{ matrix.cuda-version }} + python -c "import torch; print('PyTorch:', torch.__version__)" + python -c "import torch; print('CUDA:', torch.version.cuda)" + + - name: Set version + if: ${{ runner.os != 'macOS' }} + run: | + VERSION=`sed -n "s/^__version__ = '\(.*\)'/\1/p" torch_cluster/__init__.py` + TORCH_VERSION=`echo "pt${{ matrix.torch-version }}" | sed "s/..$//" | sed "s/\.//g"` + CUDA_VERSION=`echo ${{ matrix.cuda-version }}` + echo "New version name: $VERSION+$TORCH_VERSION$CUDA_VERSION" + sed -i "s/$VERSION/$VERSION+$TORCH_VERSION$CUDA_VERSION/" setup.py + sed -i "s/$VERSION/$VERSION+$TORCH_VERSION$CUDA_VERSION/" torch_cluster/__init__.py + shell: + bash + + - name: Install main package for CPU + if: ${{ matrix.cuda-version == 'cpu' }} + run: | + FORCE_ONLY_CPU=1 python setup.py develop + shell: + bash + + - name: Install main package for GPU + if: ${{ matrix.cuda-version != 'cpu' }} + run: | + source .github/workflows/cuda/${{ matrix.cuda-version }}-${{ runner.os }}-env.sh + python setup.py develop + shell: + bash + + - name: Test installation + run: | + python -c "import torch_cluster; print('torch-cluster:', torch_cluster.__version__)" + + - name: Build wheel + run: | + pip install wheel + python setup.py bdist_wheel --dist-dir=dist + + - name: Configure AWS + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: us-west-1 + + - name: Upload wheel + run: | + aws s3 sync dist s3://data.pyg.org/whl/torch-${{ matrix.torch-version }}+${{ matrix.cuda-version }} --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu101-Linux-env.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu101-Linux-env.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu101-Linux-env.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu101-Linux-env.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu101-Linux.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu101-Linux.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu101-Linux.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu101-Linux.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu101-Windows-env.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu101-Windows-env.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu101-Windows-env.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu101-Windows-env.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu101-Windows.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu101-Windows.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu101-Windows.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu101-Windows.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu102-Linux-env.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu102-Linux-env.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu102-Linux-env.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu102-Linux-env.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu102-Linux.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu102-Linux.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu102-Linux.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu102-Linux.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu102-Windows-env.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu102-Windows-env.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu102-Windows-env.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu102-Windows-env.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu102-Windows.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu102-Windows.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu102-Windows.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu102-Windows.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu111-Linux-env.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu111-Linux-env.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu111-Linux-env.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu111-Linux-env.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu111-Linux.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu111-Linux.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu111-Linux.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu111-Linux.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu111-Windows-env.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu111-Windows-env.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu111-Windows-env.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu111-Windows-env.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu111-Windows.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu111-Windows.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu111-Windows.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu111-Windows.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu113-Linux-env.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu113-Linux-env.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu113-Linux-env.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu113-Linux-env.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu113-Linux.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu113-Linux.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu113-Linux.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu113-Linux.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu113-Windows-env.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu113-Windows-env.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu113-Windows-env.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu113-Windows-env.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu113-Windows.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu113-Windows.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu113-Windows.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu113-Windows.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu115-Linux-env.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu115-Linux-env.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu115-Linux-env.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu115-Linux-env.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu115-Linux.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu115-Linux.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu115-Linux.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu115-Linux.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu115-Windows-env.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu115-Windows-env.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu115-Windows-env.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu115-Windows-env.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu115-Windows.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu115-Windows.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu115-Windows.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu115-Windows.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu116-Linux-env.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu116-Linux-env.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu116-Linux-env.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu116-Linux-env.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu116-Linux.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu116-Linux.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu116-Linux.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu116-Linux.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu116-Windows-env.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu116-Windows-env.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu116-Windows-env.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu116-Windows-env.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu116-Windows.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu116-Windows.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu116-Windows.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu116-Windows.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu117-Linux-env.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu117-Linux-env.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu117-Linux-env.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu117-Linux-env.sh diff --git a/pytorch_cluster-1.6.3/.github/workflows/cuda/cu117-Linux.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu117-Linux.sh new file mode 100644 index 0000000..d521965 --- /dev/null +++ b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu117-Linux.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +OS=ubuntu2004 + +wget -nv https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/cuda-${OS}.pin +sudo mv cuda-${OS}.pin /etc/apt/preferences.d/cuda-repository-pin-600 +wget -nv https://developer.download.nvidia.com/compute/cuda/11.7.1/local_installers/cuda-repo-${OS}-11-7-local_11.7.1-515.65.01-1_amd64.deb +sudo dpkg -i cuda-repo-${OS}-11-7-local_11.7.1-515.65.01-1_amd64.deb +sudo cp /var/cuda-repo-${OS}-11-7-local/cuda-*-keyring.gpg /usr/share/keyrings/ + +sudo apt-get -qq update +sudo apt install cuda-nvcc-11-7 cuda-libraries-dev-11-7 +sudo apt clean + +rm -f https://developer.download.nvidia.com/compute/cuda/11.7.1/local_installers/cuda-repo-${OS}-11-7-local_11.7.1-515.65.01-1_amd64.deb diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu117-Windows-env.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu117-Windows-env.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu117-Windows-env.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu117-Windows-env.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu117-Windows.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu117-Windows.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu117-Windows.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu117-Windows.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu118-Linux-env.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu118-Linux-env.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu118-Linux-env.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu118-Linux-env.sh diff --git a/pytorch_cluster-1.6.3/.github/workflows/cuda/cu118-Linux.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu118-Linux.sh new file mode 100644 index 0000000..46b66e7 --- /dev/null +++ b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu118-Linux.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +OS=ubuntu2004 + +wget -nv https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/cuda-${OS}.pin +sudo mv cuda-${OS}.pin /etc/apt/preferences.d/cuda-repository-pin-600 +wget -nv https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda-repo-${OS}-11-8-local_11.8.0-520.61.05-1_amd64.deb +sudo dpkg -i cuda-repo-${OS}-11-8-local_11.8.0-520.61.05-1_amd64.deb +sudo cp /var/cuda-repo-${OS}-11-8-local/cuda-*-keyring.gpg /usr/share/keyrings/ + +sudo apt-get -qq update +sudo apt install cuda-nvcc-11-8 cuda-libraries-dev-11-8 +sudo apt clean + +rm -f https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda-repo-${OS}-11-8-local_11.8.0-520.61.05-1_amd64.deb diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu118-Windows-env.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu118-Windows-env.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu118-Windows-env.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu118-Windows-env.sh diff --git a/pytorch_cluster-1.6.1/.github/workflows/cuda/cu118-Windows.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu118-Windows.sh similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/cuda/cu118-Windows.sh rename to pytorch_cluster-1.6.3/.github/workflows/cuda/cu118-Windows.sh diff --git a/pytorch_cluster-1.6.3/.github/workflows/cuda/cu121-Linux-env.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu121-Linux-env.sh new file mode 100644 index 0000000..b15ae52 --- /dev/null +++ b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu121-Linux-env.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +CUDA_HOME=/usr/local/cuda-12.1 +LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${LD_LIBRARY_PATH} +PATH=${CUDA_HOME}/bin:${PATH} + +export FORCE_CUDA=1 +export TORCH_CUDA_ARCH_LIST="5.0+PTX;6.0;7.0;7.5;8.0;8.6;9.0" diff --git a/pytorch_cluster-1.6.3/.github/workflows/cuda/cu121-Linux.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu121-Linux.sh new file mode 100644 index 0000000..dd8db12 --- /dev/null +++ b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu121-Linux.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +OS=ubuntu2004 + +wget -nv https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/cuda-${OS}.pin +sudo mv cuda-${OS}.pin /etc/apt/preferences.d/cuda-repository-pin-600 +wget -nv https://developer.download.nvidia.com/compute/cuda/12.1.1/local_installers/cuda-repo-${OS}-12-1-local_12.1.1-530.30.02-1_amd64.deb +sudo dpkg -i cuda-repo-${OS}-12-1-local_12.1.1-530.30.02-1_amd64.deb +sudo cp /var/cuda-repo-${OS}-12-1-local/cuda-*-keyring.gpg /usr/share/keyrings/ + +sudo apt-get -qq update +sudo apt install cuda-nvcc-12-1 cuda-libraries-dev-12-1 +sudo apt clean + +rm -f https://developer.download.nvidia.com/compute/cuda/12.1.0/local_installers/cuda-repo-${OS}-12-1-local_12.1.1-530.30.02-1_amd64.deb diff --git a/pytorch_cluster-1.6.3/.github/workflows/cuda/cu121-Windows-env.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu121-Windows-env.sh new file mode 100644 index 0000000..c55a035 --- /dev/null +++ b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu121-Windows-env.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +CUDA_HOME=/c/Program\ Files/NVIDIA\ GPU\ Computing\ Toolkit/CUDA/v12.1 +PATH=${CUDA_HOME}/bin:$PATH +PATH=/c/Program\ Files\ \(x86\)/Microsoft\ Visual\ Studio/2017/BuildTools/MSBuild/15.0/Bin:$PATH + +export FORCE_CUDA=1 +export TORCH_CUDA_ARCH_LIST="6.0+PTX" diff --git a/pytorch_cluster-1.6.3/.github/workflows/cuda/cu121-Windows.sh b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu121-Windows.sh new file mode 100644 index 0000000..7df067f --- /dev/null +++ b/pytorch_cluster-1.6.3/.github/workflows/cuda/cu121-Windows.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# Install NVIDIA drivers, see: +# https://github.com/pytorch/vision/blob/master/packaging/windows/internal/cuda_install.bat#L99-L102 +curl -k -L "https://drive.google.com/u/0/uc?id=1injUyo3lnarMgWyRcXqKg4UGnN0ysmuq&export=download" --output "/tmp/gpu_driver_dlls.zip" +7z x "/tmp/gpu_driver_dlls.zip" -o"/c/Windows/System32" + +export CUDA_SHORT=12.1 +export CUDA_URL=https://developer.download.nvidia.com/compute/cuda/${CUDA_SHORT}.1/local_installers +export CUDA_FILE=cuda_${CUDA_SHORT}.1_531.14_windows.exe + +# Install CUDA: +curl -k -L "${CUDA_URL}/${CUDA_FILE}" --output "${CUDA_FILE}" +echo "" +echo "Installing from ${CUDA_FILE}..." +PowerShell -Command "Start-Process -FilePath \"${CUDA_FILE}\" -ArgumentList \"-s nvcc_${CUDA_SHORT} cuobjdump_${CUDA_SHORT} nvprune_${CUDA_SHORT} cupti_${CUDA_SHORT} cublas_dev_${CUDA_SHORT} cudart_${CUDA_SHORT} cufft_dev_${CUDA_SHORT} curand_dev_${CUDA_SHORT} cusolver_dev_${CUDA_SHORT} cusparse_dev_${CUDA_SHORT} thrust_${CUDA_SHORT} npp_dev_${CUDA_SHORT} nvrtc_dev_${CUDA_SHORT} nvml_dev_${CUDA_SHORT}\" -Wait -NoNewWindow" +echo "Done!" +rm -f "${CUDA_FILE}" diff --git a/pytorch_cluster-1.6.1/.github/workflows/linting.yml b/pytorch_cluster-1.6.3/.github/workflows/linting.yml similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/linting.yml rename to pytorch_cluster-1.6.3/.github/workflows/linting.yml diff --git a/pytorch_cluster-1.6.1/.github/workflows/stale.yml b/pytorch_cluster-1.6.3/.github/workflows/stale.yml similarity index 100% rename from pytorch_cluster-1.6.1/.github/workflows/stale.yml rename to pytorch_cluster-1.6.3/.github/workflows/stale.yml diff --git a/pytorch_cluster-1.6.3/.github/workflows/testing.yml b/pytorch_cluster-1.6.3/.github/workflows/testing.yml new file mode 100644 index 0000000..2a08d95 --- /dev/null +++ b/pytorch_cluster-1.6.3/.github/workflows/testing.yml @@ -0,0 +1,46 @@ +name: Testing + +on: + push: + branches: + - master + pull_request: + +jobs: + + pytest: + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest] + python-version: [3.8] + torch-version: [2.0.0, 2.1.0] + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Install PyTorch ${{ matrix.torch-version }} + run: | + pip install torch==${{ matrix.torch-version }} --extra-index-url https://download.pytorch.org/whl/cpu + + - name: Install main package + run: | + pip install scipy==1.10.1 # Python 3.8 support + python setup.py develop + + - name: Run test-suite + run: | + pip install pytest pytest-cov + pytest --cov --cov-report=xml + + - name: Upload coverage + uses: codecov/codecov-action@v1 + if: success() + with: + fail_ci_if_error: false diff --git a/pytorch_cluster-1.6.1/.gitignore b/pytorch_cluster-1.6.3/.gitignore similarity index 100% rename from pytorch_cluster-1.6.1/.gitignore rename to pytorch_cluster-1.6.3/.gitignore diff --git a/pytorch_cluster-1.6.3/CMakeLists.txt b/pytorch_cluster-1.6.3/CMakeLists.txt new file mode 100644 index 0000000..baea7a1 --- /dev/null +++ b/pytorch_cluster-1.6.3/CMakeLists.txt @@ -0,0 +1,88 @@ +cmake_minimum_required(VERSION 3.0) +project(torchcluster) +set(CMAKE_CXX_STANDARD 14) +set(TORCHCLUSTER_VERSION 1.6.3) + +option(WITH_CUDA "Enable CUDA support" OFF) +option(WITH_PYTHON "Link to Python when building" ON) + +if(WITH_CUDA) + enable_language(CUDA) + add_definitions(-D__CUDA_NO_HALF_OPERATORS__) + add_definitions(-DWITH_CUDA) + set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr") +endif() + +if (WITH_PYTHON) + add_definitions(-DWITH_PYTHON) + find_package(Python3 COMPONENTS Development) +endif() +find_package(Torch REQUIRED) + +file(GLOB HEADERS csrc/*.h) +file(GLOB OPERATOR_SOURCES csrc/*.* csrc/cpu/*.*) +if(WITH_CUDA) + file(GLOB OPERATOR_SOURCES ${OPERATOR_SOURCES} csrc/cuda/*.h csrc/cuda/*.cu) +endif() + +add_library(${PROJECT_NAME} SHARED ${OPERATOR_SOURCES}) +target_link_libraries(${PROJECT_NAME} PRIVATE ${TORCH_LIBRARIES}) +if (WITH_PYTHON) + target_link_libraries(${PROJECT_NAME} PRIVATE Python3::Python) +endif() +set_target_properties(${PROJECT_NAME} PROPERTIES EXPORT_NAME TorchCluster) + +target_include_directories(${PROJECT_NAME} INTERFACE + "$" + $) + +include(GNUInstallDirs) +include(CMakePackageConfigHelpers) + +set(TORCHCLUSTER_CMAKECONFIG_INSTALL_DIR "share/cmake/TorchCluster" CACHE STRING "install path for TorchClusterConfig.cmake") + +configure_package_config_file(cmake/TorchClusterConfig.cmake.in + "${CMAKE_CURRENT_BINARY_DIR}/TorchClusterConfig.cmake" + INSTALL_DESTINATION ${TORCHCLUSTER_CMAKECONFIG_INSTALL_DIR}) + +write_basic_package_version_file(${CMAKE_CURRENT_BINARY_DIR}/TorchClusterConfigVersion.cmake + VERSION ${TORCHCLUSTER_VERSION} + COMPATIBILITY AnyNewerVersion) + +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/TorchClusterConfig.cmake + ${CMAKE_CURRENT_BINARY_DIR}/TorchClusterConfigVersion.cmake + DESTINATION ${TORCHCLUSTER_CMAKECONFIG_INSTALL_DIR}) + +install(TARGETS ${PROJECT_NAME} + EXPORT TorchClusterTargets + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ) + +install(EXPORT TorchClusterTargets + NAMESPACE TorchCluster:: + DESTINATION ${TORCHCLUSTER_CMAKECONFIG_INSTALL_DIR}) + +install(FILES ${HEADERS} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}) +install(FILES + csrc/cpu/fps_cpu.h + csrc/cpu/graclus_cpu.h + csrc/cpu/grid_cpu.h + csrc/cpu/rw_cpu.h + csrc/cpu/sampler_cpu.h + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/cpu) +if(WITH_CUDA) + install(FILES + csrc/cuda/fps_cuda.h + csrc/cuda/graclus_cuda.h + csrc/cuda/grid_cuda.h + csrc/cuda/knn_cuda.h + csrc/cuda/nearest_cuda.h + csrc/cuda/radius_cuda.h + csrc/cuda/rw_cuda.h + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}/cuda) +endif() + +if(WITH_CUDA) + set_property(TARGET torch_cuda PROPERTY INTERFACE_COMPILE_OPTIONS "") + set_property(TARGET torch_cpu PROPERTY INTERFACE_COMPILE_OPTIONS "") +endif() diff --git a/pytorch_cluster-1.6.1/LICENSE b/pytorch_cluster-1.6.3/LICENSE similarity index 100% rename from pytorch_cluster-1.6.1/LICENSE rename to pytorch_cluster-1.6.3/LICENSE diff --git a/pytorch_cluster-1.6.1/MANIFEST.in b/pytorch_cluster-1.6.3/MANIFEST.in similarity index 100% rename from pytorch_cluster-1.6.1/MANIFEST.in rename to pytorch_cluster-1.6.3/MANIFEST.in diff --git a/pytorch_cluster-1.6.3/README.md b/pytorch_cluster-1.6.3/README.md new file mode 100644 index 0000000..ce55d79 --- /dev/null +++ b/pytorch_cluster-1.6.3/README.md @@ -0,0 +1,294 @@ +[pypi-image]: https://badge.fury.io/py/torch-cluster.svg +[pypi-url]: https://pypi.python.org/pypi/torch-cluster +[testing-image]: https://github.com/rusty1s/pytorch_cluster/actions/workflows/testing.yml/badge.svg +[testing-url]: https://github.com/rusty1s/pytorch_cluster/actions/workflows/testing.yml +[linting-image]: https://github.com/rusty1s/pytorch_cluster/actions/workflows/linting.yml/badge.svg +[linting-url]: https://github.com/rusty1s/pytorch_cluster/actions/workflows/linting.yml +[coverage-image]: https://codecov.io/gh/rusty1s/pytorch_cluster/branch/master/graph/badge.svg +[coverage-url]: https://codecov.io/github/rusty1s/pytorch_cluster?branch=master + +# PyTorch Cluster + +[![PyPI Version][pypi-image]][pypi-url] +[![Testing Status][testing-image]][testing-url] +[![Linting Status][linting-image]][linting-url] +[![Code Coverage][coverage-image]][coverage-url] + +-------------------------------------------------------------------------------- + +This package consists of a small extension library of highly optimized graph cluster algorithms for the use in [PyTorch](http://pytorch.org/). +The package consists of the following clustering algorithms: + +* **[Graclus](#graclus)** from Dhillon *et al.*: [Weighted Graph Cuts without Eigenvectors: A Multilevel Approach](http://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf) (PAMI 2007) +* **[Voxel Grid Pooling](#voxelgrid)** from, *e.g.*, Simonovsky and Komodakis: [Dynamic Edge-Conditioned Filters in Convolutional Neural Networks on Graphs](https://arxiv.org/abs/1704.02901) (CVPR 2017) +* **[Iterative Farthest Point Sampling](#farthestpointsampling)** from, *e.g.* Qi *et al.*: [PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space](https://arxiv.org/abs/1706.02413) (NIPS 2017) +* **[k-NN](#knn-graph)** and **[Radius](#radius-graph)** graph generation +* Clustering based on **[Nearest](#nearest)** points +* **[Random Walk Sampling](#randomwalk-sampling)** from, *e.g.*, Grover and Leskovec: [node2vec: Scalable Feature Learning for Networks](https://arxiv.org/abs/1607.00653) (KDD 2016) + +All included operations work on varying data types and are implemented both for CPU and GPU. + +## Installation + +### Anaconda + +**Update:** You can now install `pytorch-cluster` via [Anaconda](https://anaconda.org/pyg/pytorch-cluster) for all major OS/PyTorch/CUDA combinations 🤗 +Given that you have [`pytorch >= 1.8.0` installed](https://pytorch.org/get-started/locally/), simply run + +``` +conda install pytorch-cluster -c pyg +``` + +### Binaries + +We alternatively provide pip wheels for all major OS/PyTorch/CUDA combinations, see [here](https://data.pyg.org/whl). + +#### PyTorch 2.1 + +To install the binaries for PyTorch 2.1.0, simply run + +``` +pip install torch-cluster -f https://data.pyg.org/whl/torch-2.1.0+${CUDA}.html +``` + +where `${CUDA}` should be replaced by either `cpu`, `cu118`, or `cu121` depending on your PyTorch installation. + +| | `cpu` | `cu118` | `cu121` | +|-------------|-------|---------|---------| +| **Linux** | ✅ | ✅ | ✅ | +| **Windows** | ✅ | ✅ | ✅ | +| **macOS** | ✅ | | | + +#### PyTorch 2.0 + +To install the binaries for PyTorch 2.0.0, simply run + +``` +pip install torch-cluster -f https://data.pyg.org/whl/torch-2.0.0+${CUDA}.html +``` + +where `${CUDA}` should be replaced by either `cpu`, `cu117`, or `cu118` depending on your PyTorch installation. + +| | `cpu` | `cu117` | `cu118` | +|-------------|-------|---------|---------| +| **Linux** | ✅ | ✅ | ✅ | +| **Windows** | ✅ | ✅ | ✅ | +| **macOS** | ✅ | | | + +**Note:** Binaries of older versions are also provided for PyTorch 1.4.0, PyTorch 1.5.0, PyTorch 1.6.0, PyTorch 1.7.0/1.7.1, PyTorch 1.8.0/1.8.1, PyTorch 1.9.0, PyTorch 1.10.0/1.10.1/1.10.2, PyTorch 1.11.0, PyTorch 1.12.0/1.12.1 and PyTorch 1.13.0/1.13.1 (following the same procedure). +For older versions, you need to explicitly specify the latest supported version number or install via `pip install --no-index` in order to prevent a manual installation from source. +You can look up the latest supported version number [here](https://data.pyg.org/whl). + +### From source + +Ensure that at least PyTorch 1.4.0 is installed and verify that `cuda/bin` and `cuda/include` are in your `$PATH` and `$CPATH` respectively, *e.g.*: + +``` +$ python -c "import torch; print(torch.__version__)" +>>> 1.4.0 + +$ python -c "import torch; print(torch.__version__)" +>>> 1.1.0 + +$ echo $PATH +>>> /usr/local/cuda/bin:... + +$ echo $CPATH +>>> /usr/local/cuda/include:... +``` + +Then run: + +``` +pip install torch-cluster +``` + +When running in a docker container without NVIDIA driver, PyTorch needs to evaluate the compute capabilities and may fail. +In this case, ensure that the compute capabilities are set via `TORCH_CUDA_ARCH_LIST`, *e.g.*: + +``` +export TORCH_CUDA_ARCH_LIST = "6.0 6.1 7.2+PTX 7.5+PTX" +``` + +## Functions + +### Graclus + +A greedy clustering algorithm of picking an unmarked vertex and matching it with one its unmarked neighbors (that maximizes its edge weight). +The GPU algorithm is adapted from Fagginger Auer and Bisseling: [A GPU Algorithm for Greedy Graph Matching](http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf) (LNCS 2012) + +```python +import torch +from torch_cluster import graclus_cluster + +row = torch.tensor([0, 1, 1, 2]) +col = torch.tensor([1, 0, 2, 1]) +weight = torch.tensor([1., 1., 1., 1.]) # Optional edge weights. + +cluster = graclus_cluster(row, col, weight) +``` + +``` +print(cluster) +tensor([0, 0, 1]) +``` + +### VoxelGrid + +A clustering algorithm, which overlays a regular grid of user-defined size over a point cloud and clusters all points within a voxel. + +```python +import torch +from torch_cluster import grid_cluster + +pos = torch.tensor([[0., 0.], [11., 9.], [2., 8.], [2., 2.], [8., 3.]]) +size = torch.Tensor([5, 5]) + +cluster = grid_cluster(pos, size) +``` + +``` +print(cluster) +tensor([0, 5, 3, 0, 1]) +``` + +### FarthestPointSampling + +A sampling algorithm, which iteratively samples the most distant point with regard to the rest points. + +```python +import torch +from torch_cluster import fps + +x = torch.tensor([[-1., -1.], [-1., 1.], [1., -1.], [1., 1.]]) +batch = torch.tensor([0, 0, 0, 0]) +index = fps(x, batch, ratio=0.5, random_start=False) +``` + +``` +print(index) +tensor([0, 3]) +``` + +### kNN-Graph + +Computes graph edges to the nearest *k* points. + +**Args:** + +* **x** *(Tensor)*: Node feature matrix of shape `[N, F]`. +* **k** *(int)*: The number of neighbors. +* **batch** *(LongTensor, optional)*: Batch vector of shape `[N]`, which assigns each node to a specific example. `batch` needs to be sorted. (default: `None`) +* **loop** *(bool, optional)*: If `True`, the graph will contain self-loops. (default: `False`) +* **flow** *(string, optional)*: The flow direction when using in combination with message passing (`"source_to_target"` or `"target_to_source"`). (default: `"source_to_target"`) +* **cosine** *(boolean, optional)*: If `True`, will use the Cosine distance instead of Euclidean distance to find nearest neighbors. (default: `False`) +* **num_workers** *(int)*: Number of workers to use for computation. Has no effect in case `batch` is not `None`, or the input lies on the GPU. (default: `1`) + +```python +import torch +from torch_cluster import knn_graph + +x = torch.tensor([[-1., -1.], [-1., 1.], [1., -1.], [1., 1.]]) +batch = torch.tensor([0, 0, 0, 0]) +edge_index = knn_graph(x, k=2, batch=batch, loop=False) +``` + +``` +print(edge_index) +tensor([[1, 2, 0, 3, 0, 3, 1, 2], + [0, 0, 1, 1, 2, 2, 3, 3]]) +``` + +### Radius-Graph + +Computes graph edges to all points within a given distance. + +**Args:** + +* **x** *(Tensor)*: Node feature matrix of shape `[N, F]`. +* **r** *(float)*: The radius. +* **batch** *(LongTensor, optional)*: Batch vector of shape `[N]`, which assigns each node to a specific example. `batch` needs to be sorted. (default: `None`) +* **loop** *(bool, optional)*: If `True`, the graph will contain self-loops. (default: `False`) +* **max_num_neighbors** *(int, optional)*: The maximum number of neighbors to return for each element. If the number of actual neighbors is greater than `max_num_neighbors`, returned neighbors are picked randomly. (default: `32`) +* **flow** *(string, optional)*: The flow direction when using in combination with message passing (`"source_to_target"` or `"target_to_source"`). (default: `"source_to_target"`) +* **num_workers** *(int)*: Number of workers to use for computation. Has no effect in case `batch` is not `None`, or the input lies on the GPU. (default: `1`) + +```python +import torch +from torch_cluster import radius_graph + +x = torch.tensor([[-1., -1.], [-1., 1.], [1., -1.], [1., 1.]]) +batch = torch.tensor([0, 0, 0, 0]) +edge_index = radius_graph(x, r=2.5, batch=batch, loop=False) +``` + +``` +print(edge_index) +tensor([[1, 2, 0, 3, 0, 3, 1, 2], + [0, 0, 1, 1, 2, 2, 3, 3]]) +``` + +### Nearest + +Clusters points in *x* together which are nearest to a given query point in *y*. +`batch_{x,y}` vectors need to be sorted. + +```python +import torch +from torch_cluster import nearest + +x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) +batch_x = torch.tensor([0, 0, 0, 0]) +y = torch.Tensor([[-1, 0], [1, 0]]) +batch_y = torch.tensor([0, 0]) +cluster = nearest(x, y, batch_x, batch_y) +``` + +``` +print(cluster) +tensor([0, 0, 1, 1]) +``` + +### RandomWalk-Sampling + +Samples random walks of length `walk_length` from all node indices in `start` in the graph given by `(row, col)`. + +```python +import torch +from torch_cluster import random_walk + +row = torch.tensor([0, 1, 1, 1, 2, 2, 3, 3, 4, 4]) +col = torch.tensor([1, 0, 2, 3, 1, 4, 1, 4, 2, 3]) +start = torch.tensor([0, 1, 2, 3, 4]) + +walk = random_walk(row, col, start, walk_length=3) +``` + +``` +print(walk) +tensor([[0, 1, 2, 4], + [1, 3, 4, 2], + [2, 4, 2, 1], + [3, 4, 2, 4], + [4, 3, 1, 0]]) +``` + +## Running tests + +``` +pytest +``` + +## C++ API + +`torch-cluster` also offers a C++ API that contains C++ equivalent of python models. + +``` +export Torch_DIR=`python -c 'import torch;print(torch.utils.cmake_prefix_path)'` +mkdir build +cd build +# Add -DWITH_CUDA=on support for the CUDA if needed +cmake .. +make +make install +``` diff --git a/pytorch_cluster-1.6.1/cmake/TorchClusterConfig.cmake.in b/pytorch_cluster-1.6.3/cmake/TorchClusterConfig.cmake.in similarity index 100% rename from pytorch_cluster-1.6.1/cmake/TorchClusterConfig.cmake.in rename to pytorch_cluster-1.6.3/cmake/TorchClusterConfig.cmake.in diff --git a/pytorch_cluster-1.6.3/conda/pytorch-cluster/README.md b/pytorch_cluster-1.6.3/conda/pytorch-cluster/README.md new file mode 100644 index 0000000..6816209 --- /dev/null +++ b/pytorch_cluster-1.6.3/conda/pytorch-cluster/README.md @@ -0,0 +1,3 @@ +``` +./build_conda.sh 3.9 2.1.0 cu118 # python, pytorch and cuda version +``` diff --git a/pytorch_cluster-1.6.3/conda/pytorch-cluster/build_conda.sh b/pytorch_cluster-1.6.3/conda/pytorch-cluster/build_conda.sh new file mode 100644 index 0000000..17d55e3 --- /dev/null +++ b/pytorch_cluster-1.6.3/conda/pytorch-cluster/build_conda.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +export PYTHON_VERSION=$1 +export TORCH_VERSION=$2 +export CUDA_VERSION=$3 + +export CONDA_PYTORCH_CONSTRAINT="pytorch==${TORCH_VERSION%.*}.*" + +if [ "${CUDA_VERSION}" = "cpu" ]; then + export CONDA_CUDATOOLKIT_CONSTRAINT="cpuonly # [not osx]" +else + case $CUDA_VERSION in + cu121) + export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==12.1.*" + ;; + cu118) + export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.8.*" + ;; + cu117) + export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.7.*" + ;; + cu116) + if [ "${TORCH_VERSION}" = "1.12.0" ]; then + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.6.*" + else + export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.6.*" + fi + ;; + cu115) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.5.*" + ;; + cu113) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.3.*" + ;; + cu111) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.1.*" + ;; + cu102) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==10.2.*" + ;; + cu101) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==10.1.*" + ;; + *) + echo "Unrecognized CUDA_VERSION=$CUDA_VERSION" + exit 1 + ;; + esac +fi + +echo "PyTorch $TORCH_VERSION+$CUDA_VERSION" +echo "- $CONDA_PYTORCH_CONSTRAINT" +echo "- $CONDA_CUDATOOLKIT_CONSTRAINT" + +if [ "${TORCH_VERSION}" = "1.12.0" ] && [ "${CUDA_VERSION}" = "cu116" ]; then + conda build . -c pytorch -c default -c nvidia -c conda-forge --output-folder "$HOME/conda-bld" +else + conda build . -c pytorch -c default -c nvidia --output-folder "$HOME/conda-bld" +fi diff --git a/pytorch_cluster-1.6.3/conda/pytorch-cluster/meta.yaml b/pytorch_cluster-1.6.3/conda/pytorch-cluster/meta.yaml new file mode 100644 index 0000000..8e106b0 --- /dev/null +++ b/pytorch_cluster-1.6.3/conda/pytorch-cluster/meta.yaml @@ -0,0 +1,39 @@ +package: + name: pytorch-cluster + version: 1.6.3 + +source: + path: ../.. + +requirements: + build: + - {{ compiler('c') }} # [win] + + host: + - pip + - python {{ environ.get('PYTHON_VERSION') }} + - {{ environ.get('CONDA_PYTORCH_CONSTRAINT') }} + - {{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }} + + run: + - python {{ environ.get('PYTHON_VERSION') }} + - {{ environ.get('CONDA_PYTORCH_CONSTRAINT') }} + - {{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }} + +build: + string: py{{ environ.get('PYTHON_VERSION').replace('.', '') }}_torch_{{ environ['TORCH_VERSION'] }}_{{ environ['CUDA_VERSION'] }} + script: pip install . + script_env: + - FORCE_CUDA + - TORCH_CUDA_ARCH_LIST + +test: + imports: + - torch_cluster + requires: + - scipy + +about: + home: https://github.com/rusty1s/pytorch_cluster + license: MIT + summary: PyTorch Extension Library of Optimized Graph Cluster Algorithms diff --git a/pytorch_cluster-1.6.1/csrc/cluster.h b/pytorch_cluster-1.6.3/csrc/cluster.h similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cluster.h rename to pytorch_cluster-1.6.3/csrc/cluster.h diff --git a/pytorch_cluster-1.6.1/csrc/cpu/fps_cpu.cpp b/pytorch_cluster-1.6.3/csrc/cpu/fps_cpu.cpp similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cpu/fps_cpu.cpp rename to pytorch_cluster-1.6.3/csrc/cpu/fps_cpu.cpp diff --git a/pytorch_cluster-1.6.1/csrc/cpu/fps_cpu.h b/pytorch_cluster-1.6.3/csrc/cpu/fps_cpu.h similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cpu/fps_cpu.h rename to pytorch_cluster-1.6.3/csrc/cpu/fps_cpu.h diff --git a/pytorch_cluster-1.6.1/csrc/cpu/graclus_cpu.cpp b/pytorch_cluster-1.6.3/csrc/cpu/graclus_cpu.cpp similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cpu/graclus_cpu.cpp rename to pytorch_cluster-1.6.3/csrc/cpu/graclus_cpu.cpp diff --git a/pytorch_cluster-1.6.1/csrc/cpu/graclus_cpu.h b/pytorch_cluster-1.6.3/csrc/cpu/graclus_cpu.h similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cpu/graclus_cpu.h rename to pytorch_cluster-1.6.3/csrc/cpu/graclus_cpu.h diff --git a/pytorch_cluster-1.6.1/csrc/cpu/grid_cpu.cpp b/pytorch_cluster-1.6.3/csrc/cpu/grid_cpu.cpp similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cpu/grid_cpu.cpp rename to pytorch_cluster-1.6.3/csrc/cpu/grid_cpu.cpp diff --git a/pytorch_cluster-1.6.1/csrc/cpu/grid_cpu.h b/pytorch_cluster-1.6.3/csrc/cpu/grid_cpu.h similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cpu/grid_cpu.h rename to pytorch_cluster-1.6.3/csrc/cpu/grid_cpu.h diff --git a/pytorch_cluster-1.6.1/csrc/cpu/knn_cpu.cpp b/pytorch_cluster-1.6.3/csrc/cpu/knn_cpu.cpp similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cpu/knn_cpu.cpp rename to pytorch_cluster-1.6.3/csrc/cpu/knn_cpu.cpp diff --git a/pytorch_cluster-1.6.1/csrc/cpu/knn_cpu.h b/pytorch_cluster-1.6.3/csrc/cpu/knn_cpu.h similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cpu/knn_cpu.h rename to pytorch_cluster-1.6.3/csrc/cpu/knn_cpu.h diff --git a/pytorch_cluster-1.6.1/csrc/cpu/radius_cpu.cpp b/pytorch_cluster-1.6.3/csrc/cpu/radius_cpu.cpp similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cpu/radius_cpu.cpp rename to pytorch_cluster-1.6.3/csrc/cpu/radius_cpu.cpp diff --git a/pytorch_cluster-1.6.1/csrc/cpu/radius_cpu.h b/pytorch_cluster-1.6.3/csrc/cpu/radius_cpu.h similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cpu/radius_cpu.h rename to pytorch_cluster-1.6.3/csrc/cpu/radius_cpu.h diff --git a/pytorch_cluster-1.6.1/csrc/cpu/rw_cpu.cpp b/pytorch_cluster-1.6.3/csrc/cpu/rw_cpu.cpp similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cpu/rw_cpu.cpp rename to pytorch_cluster-1.6.3/csrc/cpu/rw_cpu.cpp diff --git a/pytorch_cluster-1.6.1/csrc/cpu/rw_cpu.h b/pytorch_cluster-1.6.3/csrc/cpu/rw_cpu.h similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cpu/rw_cpu.h rename to pytorch_cluster-1.6.3/csrc/cpu/rw_cpu.h diff --git a/pytorch_cluster-1.6.1/csrc/cpu/sampler_cpu.cpp b/pytorch_cluster-1.6.3/csrc/cpu/sampler_cpu.cpp similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cpu/sampler_cpu.cpp rename to pytorch_cluster-1.6.3/csrc/cpu/sampler_cpu.cpp diff --git a/pytorch_cluster-1.6.1/csrc/cpu/sampler_cpu.h b/pytorch_cluster-1.6.3/csrc/cpu/sampler_cpu.h similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cpu/sampler_cpu.h rename to pytorch_cluster-1.6.3/csrc/cpu/sampler_cpu.h diff --git a/pytorch_cluster-1.6.1/csrc/cpu/utils.h b/pytorch_cluster-1.6.3/csrc/cpu/utils.h similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cpu/utils.h rename to pytorch_cluster-1.6.3/csrc/cpu/utils.h diff --git a/pytorch_cluster-1.6.1/csrc/cpu/utils/KDTreeVectorOfVectorsAdaptor.h b/pytorch_cluster-1.6.3/csrc/cpu/utils/KDTreeVectorOfVectorsAdaptor.h similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cpu/utils/KDTreeVectorOfVectorsAdaptor.h rename to pytorch_cluster-1.6.3/csrc/cpu/utils/KDTreeVectorOfVectorsAdaptor.h diff --git a/pytorch_cluster-1.6.1/csrc/cpu/utils/nanoflann.hpp b/pytorch_cluster-1.6.3/csrc/cpu/utils/nanoflann.hpp similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cpu/utils/nanoflann.hpp rename to pytorch_cluster-1.6.3/csrc/cpu/utils/nanoflann.hpp diff --git a/pytorch_cluster-1.6.1/csrc/cuda/fps_cuda.cu b/pytorch_cluster-1.6.3/csrc/cuda/fps_cuda.cu similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cuda/fps_cuda.cu rename to pytorch_cluster-1.6.3/csrc/cuda/fps_cuda.cu diff --git a/pytorch_cluster-1.6.1/csrc/cuda/fps_cuda.h b/pytorch_cluster-1.6.3/csrc/cuda/fps_cuda.h similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cuda/fps_cuda.h rename to pytorch_cluster-1.6.3/csrc/cuda/fps_cuda.h diff --git a/pytorch_cluster-1.6.1/csrc/cuda/graclus_cuda.cu b/pytorch_cluster-1.6.3/csrc/cuda/graclus_cuda.cu similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cuda/graclus_cuda.cu rename to pytorch_cluster-1.6.3/csrc/cuda/graclus_cuda.cu diff --git a/pytorch_cluster-1.6.1/csrc/cuda/graclus_cuda.h b/pytorch_cluster-1.6.3/csrc/cuda/graclus_cuda.h similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cuda/graclus_cuda.h rename to pytorch_cluster-1.6.3/csrc/cuda/graclus_cuda.h diff --git a/pytorch_cluster-1.6.1/csrc/cuda/grid_cuda.cu b/pytorch_cluster-1.6.3/csrc/cuda/grid_cuda.cu similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cuda/grid_cuda.cu rename to pytorch_cluster-1.6.3/csrc/cuda/grid_cuda.cu diff --git a/pytorch_cluster-1.6.1/csrc/cuda/grid_cuda.h b/pytorch_cluster-1.6.3/csrc/cuda/grid_cuda.h similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cuda/grid_cuda.h rename to pytorch_cluster-1.6.3/csrc/cuda/grid_cuda.h diff --git a/pytorch_cluster-1.6.1/csrc/cuda/knn_cuda.cu b/pytorch_cluster-1.6.3/csrc/cuda/knn_cuda.cu similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cuda/knn_cuda.cu rename to pytorch_cluster-1.6.3/csrc/cuda/knn_cuda.cu diff --git a/pytorch_cluster-1.6.1/csrc/cuda/knn_cuda.h b/pytorch_cluster-1.6.3/csrc/cuda/knn_cuda.h similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cuda/knn_cuda.h rename to pytorch_cluster-1.6.3/csrc/cuda/knn_cuda.h diff --git a/pytorch_cluster-1.6.1/csrc/cuda/nearest_cuda.cu b/pytorch_cluster-1.6.3/csrc/cuda/nearest_cuda.cu similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cuda/nearest_cuda.cu rename to pytorch_cluster-1.6.3/csrc/cuda/nearest_cuda.cu diff --git a/pytorch_cluster-1.6.1/csrc/cuda/nearest_cuda.h b/pytorch_cluster-1.6.3/csrc/cuda/nearest_cuda.h similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cuda/nearest_cuda.h rename to pytorch_cluster-1.6.3/csrc/cuda/nearest_cuda.h diff --git a/pytorch_cluster-1.6.1/csrc/cuda/radius_cuda.cu b/pytorch_cluster-1.6.3/csrc/cuda/radius_cuda.cu similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cuda/radius_cuda.cu rename to pytorch_cluster-1.6.3/csrc/cuda/radius_cuda.cu diff --git a/pytorch_cluster-1.6.1/csrc/cuda/radius_cuda.h b/pytorch_cluster-1.6.3/csrc/cuda/radius_cuda.h similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cuda/radius_cuda.h rename to pytorch_cluster-1.6.3/csrc/cuda/radius_cuda.h diff --git a/pytorch_cluster-1.6.1/csrc/cuda/rw_cuda.cu b/pytorch_cluster-1.6.3/csrc/cuda/rw_cuda.cu similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cuda/rw_cuda.cu rename to pytorch_cluster-1.6.3/csrc/cuda/rw_cuda.cu diff --git a/pytorch_cluster-1.6.1/csrc/cuda/rw_cuda.h b/pytorch_cluster-1.6.3/csrc/cuda/rw_cuda.h similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cuda/rw_cuda.h rename to pytorch_cluster-1.6.3/csrc/cuda/rw_cuda.h diff --git a/pytorch_cluster-1.6.1/csrc/cuda/utils.cuh b/pytorch_cluster-1.6.3/csrc/cuda/utils.cuh similarity index 100% rename from pytorch_cluster-1.6.1/csrc/cuda/utils.cuh rename to pytorch_cluster-1.6.3/csrc/cuda/utils.cuh diff --git a/pytorch_cluster-1.6.1/csrc/extensions.h b/pytorch_cluster-1.6.3/csrc/extensions.h similarity index 100% rename from pytorch_cluster-1.6.1/csrc/extensions.h rename to pytorch_cluster-1.6.3/csrc/extensions.h diff --git a/pytorch_cluster-1.6.1/csrc/fps.cpp b/pytorch_cluster-1.6.3/csrc/fps.cpp similarity index 100% rename from pytorch_cluster-1.6.1/csrc/fps.cpp rename to pytorch_cluster-1.6.3/csrc/fps.cpp diff --git a/pytorch_cluster-1.6.1/csrc/graclus.cpp b/pytorch_cluster-1.6.3/csrc/graclus.cpp similarity index 100% rename from pytorch_cluster-1.6.1/csrc/graclus.cpp rename to pytorch_cluster-1.6.3/csrc/graclus.cpp diff --git a/pytorch_cluster-1.6.1/csrc/grid.cpp b/pytorch_cluster-1.6.3/csrc/grid.cpp similarity index 100% rename from pytorch_cluster-1.6.1/csrc/grid.cpp rename to pytorch_cluster-1.6.3/csrc/grid.cpp diff --git a/pytorch_cluster-1.6.1/csrc/knn.cpp b/pytorch_cluster-1.6.3/csrc/knn.cpp similarity index 100% rename from pytorch_cluster-1.6.1/csrc/knn.cpp rename to pytorch_cluster-1.6.3/csrc/knn.cpp diff --git a/pytorch_cluster-1.6.1/csrc/macros.h b/pytorch_cluster-1.6.3/csrc/macros.h similarity index 100% rename from pytorch_cluster-1.6.1/csrc/macros.h rename to pytorch_cluster-1.6.3/csrc/macros.h diff --git a/pytorch_cluster-1.6.1/csrc/nearest.cpp b/pytorch_cluster-1.6.3/csrc/nearest.cpp similarity index 100% rename from pytorch_cluster-1.6.1/csrc/nearest.cpp rename to pytorch_cluster-1.6.3/csrc/nearest.cpp diff --git a/pytorch_cluster-1.6.1/csrc/radius.cpp b/pytorch_cluster-1.6.3/csrc/radius.cpp similarity index 100% rename from pytorch_cluster-1.6.1/csrc/radius.cpp rename to pytorch_cluster-1.6.3/csrc/radius.cpp diff --git a/pytorch_cluster-1.6.1/csrc/rw.cpp b/pytorch_cluster-1.6.3/csrc/rw.cpp similarity index 100% rename from pytorch_cluster-1.6.1/csrc/rw.cpp rename to pytorch_cluster-1.6.3/csrc/rw.cpp diff --git a/pytorch_cluster-1.6.1/csrc/sampler.cpp b/pytorch_cluster-1.6.3/csrc/sampler.cpp similarity index 100% rename from pytorch_cluster-1.6.1/csrc/sampler.cpp rename to pytorch_cluster-1.6.3/csrc/sampler.cpp diff --git a/pytorch_cluster-1.6.1/csrc/version.cpp b/pytorch_cluster-1.6.3/csrc/version.cpp similarity index 100% rename from pytorch_cluster-1.6.1/csrc/version.cpp rename to pytorch_cluster-1.6.3/csrc/version.cpp diff --git a/pytorch_cluster-1.6.3/setup.cfg b/pytorch_cluster-1.6.3/setup.cfg new file mode 100644 index 0000000..1f21bb7 --- /dev/null +++ b/pytorch_cluster-1.6.3/setup.cfg @@ -0,0 +1,19 @@ +[metadata] +long_description=file: README.md +long_description_content_type=text/markdown + +classifiers = + Development Status :: 5 - Production/Stable + License :: OSI Approved :: MIT License + Programming Language :: Python + Programming Language :: Python :: 3.8 + Programming Language :: Python :: 3.9 + Programming Language :: Python :: 3.10 + Programming Language :: Python :: 3.11 + Programming Language :: Python :: 3 :: Only + +[aliases] +test = pytest + +[tool:pytest] +addopts = --capture=no diff --git a/pytorch_cluster-1.6.3/setup.py b/pytorch_cluster-1.6.3/setup.py new file mode 100644 index 0000000..cd7de3c --- /dev/null +++ b/pytorch_cluster-1.6.3/setup.py @@ -0,0 +1,150 @@ +import glob +import os +import os.path as osp +import platform +import sys +from itertools import product + +import torch +from setuptools import find_packages, setup +from torch.__config__ import parallel_info +from torch.utils.cpp_extension import (CUDA_HOME, BuildExtension, CppExtension, + CUDAExtension) + +__version__ = '1.6.3' +URL = 'https://github.com/rusty1s/pytorch_cluster' + +WITH_CUDA = False +if torch.cuda.is_available(): + WITH_CUDA = CUDA_HOME is not None or torch.version.hip + +suffices = ['cpu', 'cuda'] if WITH_CUDA else ['cpu'] +if os.getenv('FORCE_CUDA', '0') == '1': + suffices = ['cuda', 'cpu'] +if os.getenv('FORCE_ONLY_CUDA', '0') == '1': + suffices = ['cuda'] +if os.getenv('FORCE_ONLY_CPU', '0') == '1': + suffices = ['cpu'] + +BUILD_DOCS = os.getenv('BUILD_DOCS', '0') == '1' + + +def get_extensions(): + extensions = [] + + extensions_dir = osp.join('csrc') + main_files = glob.glob(osp.join(extensions_dir, '*.cpp')) + # remove generated 'hip' files, in case of rebuilds + main_files = [path for path in main_files if 'hip' not in path] + + for main, suffix in product(main_files, suffices): + define_macros = [('WITH_PYTHON', None)] + undef_macros = [] + + if sys.platform == 'win32': + define_macros += [('torchcluster_EXPORTS', None)] + + extra_compile_args = {'cxx': ['-O2']} + if not os.name == 'nt': # Not on Windows: + extra_compile_args['cxx'] += ['-Wno-sign-compare'] + extra_link_args = ['-s'] + + info = parallel_info() + if ('backend: OpenMP' in info and 'OpenMP not found' not in info + and sys.platform != 'darwin'): + extra_compile_args['cxx'] += ['-DAT_PARALLEL_OPENMP'] + if sys.platform == 'win32': + extra_compile_args['cxx'] += ['/openmp'] + else: + extra_compile_args['cxx'] += ['-fopenmp'] + else: + print('Compiling without OpenMP...') + + # Compile for mac arm64 + if (sys.platform == 'darwin' and platform.machine() == 'arm64'): + extra_compile_args['cxx'] += ['-arch', 'arm64'] + extra_link_args += ['-arch', 'arm64'] + + if suffix == 'cuda': + define_macros += [('WITH_CUDA', None)] + nvcc_flags = os.getenv('NVCC_FLAGS', '') + nvcc_flags = [] if nvcc_flags == '' else nvcc_flags.split(' ') + nvcc_flags += ['-O2'] + extra_compile_args['nvcc'] = nvcc_flags + + if torch.version.hip: + # USE_ROCM was added to later versions of PyTorch + # Define here to support older PyTorch versions as well: + define_macros += [('USE_ROCM', None)] + undef_macros += ['__HIP_NO_HALF_CONVERSIONS__'] + else: + nvcc_flags += ['--expt-relaxed-constexpr'] + + name = main.split(os.sep)[-1][:-4] + sources = [main] + + path = osp.join(extensions_dir, 'cpu', f'{name}_cpu.cpp') + if osp.exists(path): + sources += [path] + + path = osp.join(extensions_dir, 'cuda', f'{name}_cuda.cu') + if suffix == 'cuda' and osp.exists(path): + sources += [path] + + Extension = CppExtension if suffix == 'cpu' else CUDAExtension + extension = Extension( + f'torch_cluster._{name}_{suffix}', + sources, + include_dirs=[extensions_dir], + define_macros=define_macros, + undef_macros=undef_macros, + extra_compile_args=extra_compile_args, + extra_link_args=extra_link_args, + ) + extensions += [extension] + + return extensions + + +install_requires = [ + 'scipy', +] + +test_requires = [ + 'pytest', + 'pytest-cov', +] + +# work-around hipify abs paths +include_package_data = True +if torch.cuda.is_available() and torch.version.hip: + include_package_data = False + +setup( + name='torch_cluster', + version=__version__, + description=('PyTorch Extension Library of Optimized Graph Cluster ' + 'Algorithms'), + author='Matthias Fey', + author_email='matthias.fey@tu-dortmund.de', + url=URL, + download_url=f'{URL}/archive/{__version__}.tar.gz', + keywords=[ + 'pytorch', + 'geometric-deep-learning', + 'graph-neural-networks', + 'cluster-algorithms', + ], + python_requires='>=3.8', + install_requires=install_requires, + extras_require={ + 'test': test_requires, + }, + ext_modules=get_extensions() if not BUILD_DOCS else [], + cmdclass={ + 'build_ext': + BuildExtension.with_options(no_python_abi_suffix=True, use_ninja=False) + }, + packages=find_packages(), + include_package_data=include_package_data, +) diff --git a/pytorch_cluster-1.6.1/test/test_fps.py b/pytorch_cluster-1.6.3/test/test_fps.py similarity index 79% rename from pytorch_cluster-1.6.1/test/test_fps.py rename to pytorch_cluster-1.6.3/test/test_fps.py index 52b689d..0f10e51 100644 --- a/pytorch_cluster-1.6.1/test/test_fps.py +++ b/pytorch_cluster-1.6.3/test/test_fps.py @@ -25,6 +25,8 @@ def test_fps(dtype, device): [+2, -2], ], dtype, device) batch = tensor([0, 0, 0, 0, 1, 1, 1, 1], torch.long, device) + ptr_list = [0, 4, 8] + ptr = torch.tensor(ptr_list, device=device) out = fps(x, batch, random_start=False) assert out.tolist() == [0, 2, 4, 6] @@ -32,12 +34,18 @@ def test_fps(dtype, device): out = fps(x, batch, ratio=0.5, random_start=False) assert out.tolist() == [0, 2, 4, 6] - out = fps(x, batch, ratio=torch.tensor(0.5, device=device), - random_start=False) + ratio = torch.tensor(0.5, device=device) + out = fps(x, batch, ratio=ratio, random_start=False) assert out.tolist() == [0, 2, 4, 6] - out = fps(x, batch, ratio=torch.tensor([0.5, 0.5], device=device), - random_start=False) + out = fps(x, ptr=ptr_list, ratio=0.5, random_start=False) + assert out.tolist() == [0, 2, 4, 6] + + out = fps(x, ptr=ptr, ratio=0.5, random_start=False) + assert out.tolist() == [0, 2, 4, 6] + + ratio = torch.tensor([0.5, 0.5], device=device) + out = fps(x, batch, ratio=ratio, random_start=False) assert out.tolist() == [0, 2, 4, 6] out = fps(x, random_start=False) diff --git a/pytorch_cluster-1.6.1/test/test_graclus.py b/pytorch_cluster-1.6.3/test/test_graclus.py similarity index 93% rename from pytorch_cluster-1.6.1/test/test_graclus.py rename to pytorch_cluster-1.6.3/test/test_graclus.py index b892330..c8e1f39 100644 --- a/pytorch_cluster-1.6.1/test/test_graclus.py +++ b/pytorch_cluster-1.6.3/test/test_graclus.py @@ -50,3 +50,7 @@ def test_graclus_cluster(test, dtype, device): cluster = graclus_cluster(row, col, weight) assert_correct(row, col, cluster) + + jit = torch.jit.script(graclus_cluster) + cluster = jit(row, col, weight) + assert_correct(row, col, cluster) diff --git a/pytorch_cluster-1.6.1/test/test_grid.py b/pytorch_cluster-1.6.3/test/test_grid.py similarity index 91% rename from pytorch_cluster-1.6.1/test/test_grid.py rename to pytorch_cluster-1.6.3/test/test_grid.py index c297f33..2d53220 100644 --- a/pytorch_cluster-1.6.1/test/test_grid.py +++ b/pytorch_cluster-1.6.3/test/test_grid.py @@ -38,3 +38,6 @@ def test_grid_cluster(test, dtype, device): cluster = grid_cluster(pos, size, start, end) assert cluster.tolist() == test['cluster'] + + jit = torch.jit.script(grid_cluster) + assert torch.equal(jit(pos, size, start, end), cluster) diff --git a/pytorch_cluster-1.6.3/test/test_knn.py b/pytorch_cluster-1.6.3/test/test_knn.py new file mode 100644 index 0000000..32852fe --- /dev/null +++ b/pytorch_cluster-1.6.3/test/test_knn.py @@ -0,0 +1,88 @@ +from itertools import product + +import pytest +import scipy.spatial +import torch +from torch_cluster import knn, knn_graph +from torch_cluster.testing import devices, grad_dtypes, tensor + + +def to_set(edge_index): + return set([(i, j) for i, j in edge_index.t().tolist()]) + + +@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices)) +def test_knn(dtype, device): + x = tensor([ + [-1, -1], + [-1, +1], + [+1, +1], + [+1, -1], + [-1, -1], + [-1, +1], + [+1, +1], + [+1, -1], + ], dtype, device) + y = tensor([ + [1, 0], + [-1, 0], + ], dtype, device) + + batch_x = tensor([0, 0, 0, 0, 1, 1, 1, 1], torch.long, device) + batch_y = tensor([0, 1], torch.long, device) + + edge_index = knn(x, y, 2) + assert to_set(edge_index) == set([(0, 2), (0, 3), (1, 0), (1, 1)]) + + jit = torch.jit.script(knn) + edge_index = jit(x, y, 2) + assert to_set(edge_index) == set([(0, 2), (0, 3), (1, 0), (1, 1)]) + + edge_index = knn(x, y, 2, batch_x, batch_y) + assert to_set(edge_index) == set([(0, 2), (0, 3), (1, 4), (1, 5)]) + + if x.is_cuda: + edge_index = knn(x, y, 2, batch_x, batch_y, cosine=True) + assert to_set(edge_index) == set([(0, 2), (0, 3), (1, 4), (1, 5)]) + + # Skipping a batch + batch_x = tensor([0, 0, 0, 0, 2, 2, 2, 2], torch.long, device) + batch_y = tensor([0, 2], torch.long, device) + edge_index = knn(x, y, 2, batch_x, batch_y) + assert to_set(edge_index) == set([(0, 2), (0, 3), (1, 4), (1, 5)]) + + +@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices)) +def test_knn_graph(dtype, device): + x = tensor([ + [-1, -1], + [-1, +1], + [+1, +1], + [+1, -1], + ], dtype, device) + + edge_index = knn_graph(x, k=2, flow='target_to_source') + assert to_set(edge_index) == set([(0, 1), (0, 3), (1, 0), (1, 2), (2, 1), + (2, 3), (3, 0), (3, 2)]) + + edge_index = knn_graph(x, k=2, flow='source_to_target') + assert to_set(edge_index) == set([(1, 0), (3, 0), (0, 1), (2, 1), (1, 2), + (3, 2), (0, 3), (2, 3)]) + + jit = torch.jit.script(knn_graph) + edge_index = jit(x, k=2, flow='source_to_target') + assert to_set(edge_index) == set([(1, 0), (3, 0), (0, 1), (2, 1), (1, 2), + (3, 2), (0, 3), (2, 3)]) + + +@pytest.mark.parametrize('dtype,device', product([torch.float], devices)) +def test_knn_graph_large(dtype, device): + x = torch.randn(1000, 3, dtype=dtype, device=device) + + edge_index = knn_graph(x, k=5, flow='target_to_source', loop=True) + + tree = scipy.spatial.cKDTree(x.cpu().numpy()) + _, col = tree.query(x.cpu(), k=5) + truth = set([(i, j) for i, ns in enumerate(col) for j in ns]) + + assert to_set(edge_index.cpu()) == truth diff --git a/pytorch_cluster-1.6.1/test/test_nearest.py b/pytorch_cluster-1.6.3/test/test_nearest.py similarity index 100% rename from pytorch_cluster-1.6.1/test/test_nearest.py rename to pytorch_cluster-1.6.3/test/test_nearest.py diff --git a/pytorch_cluster-1.6.1/test/test_radius.py b/pytorch_cluster-1.6.3/test/test_radius.py similarity index 80% rename from pytorch_cluster-1.6.1/test/test_radius.py rename to pytorch_cluster-1.6.3/test/test_radius.py index 34c4ad9..078412f 100644 --- a/pytorch_cluster-1.6.1/test/test_radius.py +++ b/pytorch_cluster-1.6.3/test/test_radius.py @@ -35,6 +35,11 @@ def test_radius(dtype, device): assert to_set(edge_index) == set([(0, 0), (0, 1), (0, 2), (0, 3), (1, 1), (1, 2), (1, 5), (1, 6)]) + jit = torch.jit.script(radius) + edge_index = jit(x, y, 2, max_num_neighbors=4) + assert to_set(edge_index) == set([(0, 0), (0, 1), (0, 2), (0, 3), (1, 1), + (1, 2), (1, 5), (1, 6)]) + edge_index = radius(x, y, 2, batch_x, batch_y, max_num_neighbors=4) assert to_set(edge_index) == set([(0, 0), (0, 1), (0, 2), (0, 3), (1, 5), (1, 6)]) @@ -64,12 +69,20 @@ def test_radius_graph(dtype, device): assert to_set(edge_index) == set([(1, 0), (3, 0), (0, 1), (2, 1), (1, 2), (3, 2), (0, 3), (2, 3)]) + jit = torch.jit.script(radius_graph) + edge_index = jit(x, r=2.5, flow='source_to_target') + assert to_set(edge_index) == set([(1, 0), (3, 0), (0, 1), (2, 1), (1, 2), + (3, 2), (0, 3), (2, 3)]) + @pytest.mark.parametrize('dtype,device', product([torch.float], devices)) def test_radius_graph_large(dtype, device): x = torch.randn(1000, 3, dtype=dtype, device=device) - edge_index = radius_graph(x, r=0.5, flow='target_to_source', loop=True, + edge_index = radius_graph(x, + r=0.5, + flow='target_to_source', + loop=True, max_num_neighbors=2000) tree = scipy.spatial.cKDTree(x.cpu().numpy()) diff --git a/pytorch_cluster-1.6.1/test/test_rw.py b/pytorch_cluster-1.6.3/test/test_rw.py similarity index 95% rename from pytorch_cluster-1.6.1/test/test_rw.py rename to pytorch_cluster-1.6.3/test/test_rw.py index 67d935d..82a8b77 100644 --- a/pytorch_cluster-1.6.1/test/test_rw.py +++ b/pytorch_cluster-1.6.3/test/test_rw.py @@ -31,6 +31,9 @@ def test_rw_small(device): out = random_walk(row, col, start, walk_length, num_nodes=3) assert out.tolist() == [[0, 1, 0, 1, 0], [1, 0, 1, 0, 1], [2, 2, 2, 2, 2]] + jit = torch.jit.script(random_walk) + assert torch.equal(jit(row, col, start, walk_length, num_nodes=3), out) + @pytest.mark.parametrize('device', devices) def test_rw_large_with_edge_indices(device): diff --git a/pytorch_cluster-1.6.1/test/test_sampler.py b/pytorch_cluster-1.6.3/test/test_sampler.py similarity index 100% rename from pytorch_cluster-1.6.1/test/test_sampler.py rename to pytorch_cluster-1.6.3/test/test_sampler.py diff --git a/pytorch_cluster-1.6.3/torch_cluster/__init__.py b/pytorch_cluster-1.6.3/torch_cluster/__init__.py new file mode 100644 index 0000000..39d1d67 --- /dev/null +++ b/pytorch_cluster-1.6.3/torch_cluster/__init__.py @@ -0,0 +1,60 @@ +import importlib +import os.path as osp + +import torch + +__version__ = '1.6.3' + +for library in [ + '_version', '_grid', '_graclus', '_fps', '_rw', '_sampler', '_nearest', + '_knn', '_radius' +]: + cuda_spec = importlib.machinery.PathFinder().find_spec( + f'{library}_cuda', [osp.dirname(__file__)]) + cpu_spec = importlib.machinery.PathFinder().find_spec( + f'{library}_cpu', [osp.dirname(__file__)]) + spec = cuda_spec or cpu_spec + if spec is not None: + torch.ops.load_library(spec.origin) + else: # pragma: no cover + raise ImportError(f"Could not find module '{library}_cpu' in " + f"{osp.dirname(__file__)}") + +cuda_version = torch.ops.torch_cluster.cuda_version() +if torch.version.cuda is not None and cuda_version != -1: # pragma: no cover + if cuda_version < 10000: + major, minor = int(str(cuda_version)[0]), int(str(cuda_version)[2]) + else: + major, minor = int(str(cuda_version)[0:2]), int(str(cuda_version)[3]) + t_major, t_minor = [int(x) for x in torch.version.cuda.split('.')] + + if t_major != major: + raise RuntimeError( + f'Detected that PyTorch and torch_cluster were compiled with ' + f'different CUDA versions. PyTorch has CUDA version ' + f'{t_major}.{t_minor} and torch_cluster has CUDA version ' + f'{major}.{minor}. Please reinstall the torch_cluster that ' + f'matches your PyTorch install.') + +from .fps import fps # noqa +from .graclus import graclus_cluster # noqa +from .grid import grid_cluster # noqa +from .knn import knn, knn_graph # noqa +from .nearest import nearest # noqa +from .radius import radius, radius_graph # noqa +from .rw import random_walk # noqa +from .sampler import neighbor_sampler # noqa + +__all__ = [ + 'graclus_cluster', + 'grid_cluster', + 'fps', + 'nearest', + 'knn', + 'knn_graph', + 'radius', + 'radius_graph', + 'random_walk', + 'neighbor_sampler', + '__version__', +] diff --git a/pytorch_cluster-1.6.3/torch_cluster/fps.py b/pytorch_cluster-1.6.3/torch_cluster/fps.py new file mode 100644 index 0000000..7baf981 --- /dev/null +++ b/pytorch_cluster-1.6.3/torch_cluster/fps.py @@ -0,0 +1,107 @@ +from typing import List, Optional, Union + +import torch +from torch import Tensor + +import torch_cluster.typing + + +@torch.jit._overload # noqa +def fps(src, batch, ratio, random_start, batch_size, ptr): # noqa + # type: (Tensor, Optional[Tensor], Optional[float], bool, Optional[int], Optional[Tensor]) -> Tensor # noqa + pass # pragma: no cover + + +@torch.jit._overload # noqa +def fps(src, batch, ratio, random_start, batch_size, ptr): # noqa + # type: (Tensor, Optional[Tensor], Optional[Tensor], bool, Optional[int], Optional[Tensor]) -> Tensor # noqa + pass # pragma: no cover + + +@torch.jit._overload # noqa +def fps(src, batch, ratio, random_start, batch_size, ptr): # noqa + # type: (Tensor, Optional[Tensor], Optional[float], bool, Optional[int], Optional[List[int]]) -> Tensor # noqa + pass # pragma: no cover + + +@torch.jit._overload # noqa +def fps(src, batch, ratio, random_start, batch_size, ptr): # noqa + # type: (Tensor, Optional[Tensor], Optional[Tensor], bool, Optional[int], Optional[List[int]]) -> Tensor # noqa + pass # pragma: no cover + + +def fps( # noqa + src: torch.Tensor, + batch: Optional[Tensor] = None, + ratio: Optional[Union[Tensor, float]] = None, + random_start: bool = True, + batch_size: Optional[int] = None, + ptr: Optional[Union[Tensor, List[int]]] = None, +): + r""""A sampling algorithm from the `"PointNet++: Deep Hierarchical Feature + Learning on Point Sets in a Metric Space" + `_ paper, which iteratively samples the + most distant point with regard to the rest points. + + Args: + src (Tensor): Point feature matrix + :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. + batch (LongTensor, optional): Batch vector + :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each + node to a specific example. (default: :obj:`None`) + ratio (float or Tensor, optional): Sampling ratio. + (default: :obj:`0.5`) + random_start (bool, optional): If set to :obj:`False`, use the first + node in :math:`\mathbf{X}` as starting node. (default: obj:`True`) + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. (default: :obj:`None`) + ptr (torch.Tensor or [int], optional): If given, batch assignment will + be determined based on boundaries in CSR representation, *e.g.*, + :obj:`batch=[0,0,1,1,1,2]` translates to :obj:`ptr=[0,2,5,6]`. + (default: :obj:`None`) + + :rtype: :class:`LongTensor` + + .. code-block:: python + + import torch + from torch_cluster import fps + + src = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) + batch = torch.tensor([0, 0, 0, 0]) + index = fps(src, batch, ratio=0.5) + """ + r: Optional[Tensor] = None + if ratio is None: + r = torch.tensor(0.5, dtype=src.dtype, device=src.device) + elif isinstance(ratio, float): + r = torch.tensor(ratio, dtype=src.dtype, device=src.device) + else: + r = ratio + assert r is not None + + if ptr is not None: + if isinstance(ptr, list) and torch_cluster.typing.WITH_PTR_LIST: + return torch.ops.torch_cluster.fps_ptr_list( + src, ptr, r, random_start) + + if isinstance(ptr, list): + return torch.ops.torch_cluster.fps( + src, torch.tensor(ptr, device=src.device), r, random_start) + else: + return torch.ops.torch_cluster.fps(src, ptr, r, random_start) + + if batch is not None: + assert src.size(0) == batch.numel() + if batch_size is None: + batch_size = int(batch.max()) + 1 + + deg = src.new_zeros(batch_size, dtype=torch.long) + deg.scatter_add_(0, batch, torch.ones_like(batch)) + + ptr_vec = deg.new_zeros(batch_size + 1) + torch.cumsum(deg, 0, out=ptr_vec[1:]) + else: + ptr_vec = torch.tensor([0, src.size(0)], device=src.device) + + return torch.ops.torch_cluster.fps(src, ptr_vec, r, random_start) diff --git a/pytorch_cluster-1.6.1/torch_cluster/graclus.py b/pytorch_cluster-1.6.3/torch_cluster/graclus.py similarity index 88% rename from pytorch_cluster-1.6.1/torch_cluster/graclus.py rename to pytorch_cluster-1.6.3/torch_cluster/graclus.py index cdac892..7fa834d 100644 --- a/pytorch_cluster-1.6.1/torch_cluster/graclus.py +++ b/pytorch_cluster-1.6.3/torch_cluster/graclus.py @@ -3,10 +3,12 @@ import torch -@torch.jit.script -def graclus_cluster(row: torch.Tensor, col: torch.Tensor, - weight: Optional[torch.Tensor] = None, - num_nodes: Optional[int] = None) -> torch.Tensor: +def graclus_cluster( + row: torch.Tensor, + col: torch.Tensor, + weight: Optional[torch.Tensor] = None, + num_nodes: Optional[int] = None, +) -> torch.Tensor: """A greedy clustering algorithm of picking an unmarked vertex and matching it with one its unmarked neighbors (that maximizes its edge weight). diff --git a/pytorch_cluster-1.6.1/torch_cluster/grid.py b/pytorch_cluster-1.6.3/torch_cluster/grid.py similarity index 81% rename from pytorch_cluster-1.6.1/torch_cluster/grid.py rename to pytorch_cluster-1.6.3/torch_cluster/grid.py index 1dbacb9..da59d51 100644 --- a/pytorch_cluster-1.6.1/torch_cluster/grid.py +++ b/pytorch_cluster-1.6.3/torch_cluster/grid.py @@ -3,10 +3,12 @@ import torch -@torch.jit.script -def grid_cluster(pos: torch.Tensor, size: torch.Tensor, - start: Optional[torch.Tensor] = None, - end: Optional[torch.Tensor] = None) -> torch.Tensor: +def grid_cluster( + pos: torch.Tensor, + size: torch.Tensor, + start: Optional[torch.Tensor] = None, + end: Optional[torch.Tensor] = None, +) -> torch.Tensor: """A clustering algorithm, which overlays a regular grid of user-defined size over a point cloud and clusters all points within a voxel. diff --git a/pytorch_cluster-1.6.3/torch_cluster/knn.py b/pytorch_cluster-1.6.3/torch_cluster/knn.py new file mode 100644 index 0000000..cf8f087 --- /dev/null +++ b/pytorch_cluster-1.6.3/torch_cluster/knn.py @@ -0,0 +1,144 @@ +from typing import Optional + +import torch + + +def knn( + x: torch.Tensor, + y: torch.Tensor, + k: int, + batch_x: Optional[torch.Tensor] = None, + batch_y: Optional[torch.Tensor] = None, + cosine: bool = False, + num_workers: int = 1, + batch_size: Optional[int] = None, +) -> torch.Tensor: + r"""Finds for each element in :obj:`y` the :obj:`k` nearest points in + :obj:`x`. + + Args: + x (Tensor): Node feature matrix + :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. + y (Tensor): Node feature matrix + :math:`\mathbf{X} \in \mathbb{R}^{M \times F}`. + k (int): The number of neighbors. + batch_x (LongTensor, optional): Batch vector + :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each + node to a specific example. :obj:`batch_x` needs to be sorted. + (default: :obj:`None`) + batch_y (LongTensor, optional): Batch vector + :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^M`, which assigns each + node to a specific example. :obj:`batch_y` needs to be sorted. + (default: :obj:`None`) + cosine (boolean, optional): If :obj:`True`, will use the Cosine + distance instead of the Euclidean distance to find nearest + neighbors. (default: :obj:`False`) + num_workers (int): Number of workers to use for computation. Has no + effect in case :obj:`batch_x` or :obj:`batch_y` is not + :obj:`None`, or the input lies on the GPU. (default: :obj:`1`) + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. (default: :obj:`None`) + + :rtype: :class:`LongTensor` + + .. code-block:: python + + import torch + from torch_cluster import knn + + x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) + batch_x = torch.tensor([0, 0, 0, 0]) + y = torch.Tensor([[-1, 0], [1, 0]]) + batch_y = torch.tensor([0, 0]) + assign_index = knn(x, y, 2, batch_x, batch_y) + """ + if x.numel() == 0 or y.numel() == 0: + return torch.empty(2, 0, dtype=torch.long, device=x.device) + + x = x.view(-1, 1) if x.dim() == 1 else x + y = y.view(-1, 1) if y.dim() == 1 else y + x, y = x.contiguous(), y.contiguous() + + if batch_size is None: + batch_size = 1 + if batch_x is not None: + assert x.size(0) == batch_x.numel() + batch_size = int(batch_x.max()) + 1 + if batch_y is not None: + assert y.size(0) == batch_y.numel() + batch_size = max(batch_size, int(batch_y.max()) + 1) + assert batch_size > 0 + + ptr_x: Optional[torch.Tensor] = None + ptr_y: Optional[torch.Tensor] = None + if batch_size > 1: + assert batch_x is not None + assert batch_y is not None + arange = torch.arange(batch_size + 1, device=x.device) + ptr_x = torch.bucketize(arange, batch_x) + ptr_y = torch.bucketize(arange, batch_y) + + return torch.ops.torch_cluster.knn(x, y, ptr_x, ptr_y, k, cosine, + num_workers) + + +def knn_graph( + x: torch.Tensor, + k: int, + batch: Optional[torch.Tensor] = None, + loop: bool = False, + flow: str = 'source_to_target', + cosine: bool = False, + num_workers: int = 1, + batch_size: Optional[int] = None, +) -> torch.Tensor: + r"""Computes graph edges to the nearest :obj:`k` points. + + Args: + x (Tensor): Node feature matrix + :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. + k (int): The number of neighbors. + batch (LongTensor, optional): Batch vector + :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each + node to a specific example. :obj:`batch` needs to be sorted. + (default: :obj:`None`) + loop (bool, optional): If :obj:`True`, the graph will contain + self-loops. (default: :obj:`False`) + flow (string, optional): The flow direction when used in combination + with message passing (:obj:`"source_to_target"` or + :obj:`"target_to_source"`). (default: :obj:`"source_to_target"`) + cosine (boolean, optional): If :obj:`True`, will use the Cosine + distance instead of Euclidean distance to find nearest neighbors. + (default: :obj:`False`) + num_workers (int): Number of workers to use for computation. Has no + effect in case :obj:`batch` is not :obj:`None`, or the input lies + on the GPU. (default: :obj:`1`) + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. (default: :obj:`None`) + + :rtype: :class:`LongTensor` + + .. code-block:: python + + import torch + from torch_cluster import knn_graph + + x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) + batch = torch.tensor([0, 0, 0, 0]) + edge_index = knn_graph(x, k=2, batch=batch, loop=False) + """ + + assert flow in ['source_to_target', 'target_to_source'] + edge_index = knn(x, x, k if loop else k + 1, batch, batch, cosine, + num_workers, batch_size) + + if flow == 'source_to_target': + row, col = edge_index[1], edge_index[0] + else: + row, col = edge_index[0], edge_index[1] + + if not loop: + mask = row != col + row, col = row[mask], col[mask] + + return torch.stack([row, col], dim=0) diff --git a/pytorch_cluster-1.6.1/torch_cluster/nearest.py b/pytorch_cluster-1.6.3/torch_cluster/nearest.py similarity index 100% rename from pytorch_cluster-1.6.1/torch_cluster/nearest.py rename to pytorch_cluster-1.6.3/torch_cluster/nearest.py diff --git a/pytorch_cluster-1.6.1/torch_cluster/radius.py b/pytorch_cluster-1.6.3/torch_cluster/radius.py similarity index 78% rename from pytorch_cluster-1.6.1/torch_cluster/radius.py rename to pytorch_cluster-1.6.3/torch_cluster/radius.py index fd73b75..069824a 100644 --- a/pytorch_cluster-1.6.1/torch_cluster/radius.py +++ b/pytorch_cluster-1.6.3/torch_cluster/radius.py @@ -3,11 +3,16 @@ import torch -@torch.jit.script -def radius(x: torch.Tensor, y: torch.Tensor, r: float, - batch_x: Optional[torch.Tensor] = None, - batch_y: Optional[torch.Tensor] = None, max_num_neighbors: int = 32, - num_workers: int = 1) -> torch.Tensor: +def radius( + x: torch.Tensor, + y: torch.Tensor, + r: float, + batch_x: Optional[torch.Tensor] = None, + batch_y: Optional[torch.Tensor] = None, + max_num_neighbors: int = 32, + num_workers: int = 1, + batch_size: Optional[int] = None, +) -> torch.Tensor: r"""Finds for each element in :obj:`y` all points in :obj:`x` within distance :obj:`r`. @@ -33,6 +38,8 @@ def radius(x: torch.Tensor, y: torch.Tensor, r: float, num_workers (int): Number of workers to use for computation. Has no effect in case :obj:`batch_x` or :obj:`batch_y` is not :obj:`None`, or the input lies on the GPU. (default: :obj:`1`) + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. (default: :obj:`None`) .. code-block:: python @@ -52,16 +59,19 @@ def radius(x: torch.Tensor, y: torch.Tensor, r: float, y = y.view(-1, 1) if y.dim() == 1 else y x, y = x.contiguous(), y.contiguous() - batch_size = 1 - if batch_x is not None: - assert x.size(0) == batch_x.numel() - batch_size = int(batch_x.max()) + 1 - if batch_y is not None: - assert y.size(0) == batch_y.numel() - batch_size = max(batch_size, int(batch_y.max()) + 1) + if batch_size is None: + batch_size = 1 + if batch_x is not None: + assert x.size(0) == batch_x.numel() + batch_size = int(batch_x.max()) + 1 + if batch_y is not None: + assert y.size(0) == batch_y.numel() + batch_size = max(batch_size, int(batch_y.max()) + 1) + assert batch_size > 0 ptr_x: Optional[torch.Tensor] = None ptr_y: Optional[torch.Tensor] = None + if batch_size > 1: assert batch_x is not None assert batch_y is not None @@ -73,11 +83,16 @@ def radius(x: torch.Tensor, y: torch.Tensor, r: float, max_num_neighbors, num_workers) -@torch.jit.script -def radius_graph(x: torch.Tensor, r: float, - batch: Optional[torch.Tensor] = None, loop: bool = False, - max_num_neighbors: int = 32, flow: str = 'source_to_target', - num_workers: int = 1) -> torch.Tensor: +def radius_graph( + x: torch.Tensor, + r: float, + batch: Optional[torch.Tensor] = None, + loop: bool = False, + max_num_neighbors: int = 32, + flow: str = 'source_to_target', + num_workers: int = 1, + batch_size: Optional[int] = None, +) -> torch.Tensor: r"""Computes graph edges to all points within a given distance. Args: @@ -101,6 +116,8 @@ def radius_graph(x: torch.Tensor, r: float, num_workers (int): Number of workers to use for computation. Has no effect in case :obj:`batch` is not :obj:`None`, or the input lies on the GPU. (default: :obj:`1`) + batch_size (int, optional): The number of examples :math:`B`. + Automatically calculated if not given. (default: :obj:`None`) :rtype: :class:`LongTensor` @@ -117,7 +134,7 @@ def radius_graph(x: torch.Tensor, r: float, assert flow in ['source_to_target', 'target_to_source'] edge_index = radius(x, x, r, batch, batch, max_num_neighbors if loop else max_num_neighbors + 1, - num_workers) + num_workers, batch_size) if flow == 'source_to_target': row, col = edge_index[1], edge_index[0] else: diff --git a/pytorch_cluster-1.6.1/torch_cluster/rw.py b/pytorch_cluster-1.6.3/torch_cluster/rw.py similarity index 96% rename from pytorch_cluster-1.6.1/torch_cluster/rw.py rename to pytorch_cluster-1.6.3/torch_cluster/rw.py index 12e0683..ecd07e9 100644 --- a/pytorch_cluster-1.6.1/torch_cluster/rw.py +++ b/pytorch_cluster-1.6.3/torch_cluster/rw.py @@ -4,7 +4,6 @@ from torch import Tensor -@torch.jit.script def random_walk( row: Tensor, col: Tensor, @@ -55,8 +54,7 @@ def random_walk( torch.cumsum(deg, 0, out=rowptr[1:]) node_seq, edge_seq = torch.ops.torch_cluster.random_walk( - rowptr, col, start, walk_length, p, q, - ) + rowptr, col, start, walk_length, p, q) if return_edge_indices: return node_seq, edge_seq diff --git a/pytorch_cluster-1.6.1/torch_cluster/sampler.py b/pytorch_cluster-1.6.3/torch_cluster/sampler.py similarity index 95% rename from pytorch_cluster-1.6.1/torch_cluster/sampler.py rename to pytorch_cluster-1.6.3/torch_cluster/sampler.py index 9d2e08e..1b68de0 100644 --- a/pytorch_cluster-1.6.1/torch_cluster/sampler.py +++ b/pytorch_cluster-1.6.3/torch_cluster/sampler.py @@ -1,7 +1,6 @@ import torch -@torch.jit.script def neighbor_sampler(start: torch.Tensor, rowptr: torch.Tensor, size: float): assert not start.is_cuda diff --git a/pytorch_cluster-1.6.1/torch_cluster/testing.py b/pytorch_cluster-1.6.3/torch_cluster/testing.py similarity index 100% rename from pytorch_cluster-1.6.1/torch_cluster/testing.py rename to pytorch_cluster-1.6.3/torch_cluster/testing.py diff --git a/pytorch_cluster-1.6.3/torch_cluster/typing.py b/pytorch_cluster-1.6.3/torch_cluster/typing.py new file mode 100644 index 0000000..f57544a --- /dev/null +++ b/pytorch_cluster-1.6.3/torch_cluster/typing.py @@ -0,0 +1,6 @@ +import torch + +try: + WITH_PTR_LIST = hasattr(torch.ops.torch_cluster, 'fps_ptr_list') +except Exception: + WITH_PTR_LIST = False diff --git a/pytorch_geometric-2.3.1/.github/actions/setup/action.yml b/pytorch_geometric-2.3.1/.github/actions/setup/action.yml deleted file mode 100644 index 412dc62..0000000 --- a/pytorch_geometric-2.3.1/.github/actions/setup/action.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: Setup - -inputs: - python-version: - required: false - default: '3.8' - torch-version: - required: false - default: '2.0.0' - cuda-version: - required: false - default: cpu - torchvision-version: - required: false - default: '0.15.0' - full_install: - required: false - default: true - -runs: - using: composite - - steps: - - name: Set up Python ${{ inputs.python-version }} - uses: actions/setup-python@v4.3.0 - with: - python-version: ${{ inputs.python-version }} - check-latest: true - cache: pip - cache-dependency-path: | - setup.py - - - name: Install PyTorch ${{ inputs.torch-version }}+${{ inputs.cuda-version }} - run: | - pip install torch==${{ inputs.torch-version }} --extra-index-url https://download.pytorch.org/whl/${{ inputs.cuda-version }} - python -c "import torch; print('PyTorch:', torch.__version__)" - python -c "import torch; print('CUDA:', torch.version.cuda)" - shell: bash - - - name: Install extension packages - if: ${{ inputs.full_install == 'true' }} - run: | - pip install torchvision==${{ inputs.torchvision-version }} --extra-index-url https://download.pytorch.org/whl/${{ inputs.cuda-version }} - pip install torch-scatter torch-sparse torch-cluster torch-spline-conv -f https://data.pyg.org/whl/torch-${{ inputs.torch-version }}+${{ inputs.cuda-version }}.html - shell: bash - - - name: Install pyg-lib # pyg-lib is currently only available on Linux. - if: ${{ inputs.full_install == 'true' && runner.os == 'Linux' }} - run: | - pip install pyg-lib -f https://data.pyg.org/whl/nightly/torch-${{ inputs.torch-version }}+${{ inputs.cuda-version }}.html - shell: bash diff --git a/pytorch_geometric-2.3.1/.github/workflows/building_pyg_conda.yml b/pytorch_geometric-2.3.1/.github/workflows/building_pyg_conda.yml deleted file mode 100644 index 4464ddf..0000000 --- a/pytorch_geometric-2.3.1/.github/workflows/building_pyg_conda.yml +++ /dev/null @@ -1,87 +0,0 @@ -name: Building PyG Conda - -on: [workflow_dispatch] # yamllint disable-line rule:truthy - -jobs: - - conda-build: - runs-on: ${{ matrix.os }} - - strategy: - fail-fast: false - matrix: - os: [ubuntu-18.04, macos-10.15, windows-2019] - # We have troube building for Python 3.11 due to version conflicts. - python-version: ['3.7', '3.8', '3.9', '3.10'] # '3.11' - torch-version: [1.12.0, 1.13.0, 2.0.0] - cuda-version: ['cpu', 'cu102', 'cu113', 'cu116', 'cu117', 'cu118'] - exclude: - - torch-version: 1.12.0 - python-version: '3.11' - - torch-version: 1.12.0 - cuda-version: 'cu117' - - torch-version: 1.12.0 - cuda-version: 'cu118' - - torch-version: 1.13.0 - python-version: '3.11' - - torch-version: 1.13.0 - cuda-version: 'cu102' - - torch-version: 1.13.0 - cuda-version: 'cu113' - - torch-version: 1.13.0 - cuda-version: 'cu118' - - torch-version: 2.0.0 - python-version: '3.7' - - torch-version: 2.0.0 - cuda-version: 'cu102' - - torch-version: 2.0.0 - cuda-version: 'cu113' - - torch-version: 2.0.0 - cuda-version: 'cu115' - - torch-version: 2.0.0 - cuda-version: 'cu116' - - os: macos-10.15 - cuda-version: 'cu102' - - os: macos-10.15 - cuda-version: 'cu113' - - os: macos-10.15 - cuda-version: 'cu116' - - os: macos-10.15 - cuda-version: 'cu117' - - os: macos-10.15 - cuda-version: 'cu118' - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - - name: Set up Conda for Python ${{ matrix.python-version }} - uses: conda-incubator/setup-miniconda@v2 - with: - python-version: ${{ matrix.python-version }} - - - name: Free up disk space - if: ${{ runner.os == 'Linux' }} - run: | - sudo rm -rf /usr/share/dotnet - - - name: Install Conda packages - run: | - conda install conda-build conda-verify --yes - shell: - bash -l {0} - - - name: Build Conda package - run: | - ./conda/pyg/build_conda.sh ${{ matrix.python-version }} ${{ matrix.torch-version }} ${{ matrix.cuda-version }} - shell: - bash -l {0} - - - name: Publish Conda package - run: | - conda install anaconda-client --yes - anaconda upload --force --label main $HOME/conda-bld/*/*.tar.bz2 - env: - ANACONDA_API_TOKEN: ${{ secrets.PYG_ANACONDA_TOKEN }} - shell: - bash -l {0} diff --git a/pytorch_geometric-2.3.1/.github/workflows/building_rusty1s_conda.yml b/pytorch_geometric-2.3.1/.github/workflows/building_rusty1s_conda.yml deleted file mode 100644 index 296a402..0000000 --- a/pytorch_geometric-2.3.1/.github/workflows/building_rusty1s_conda.yml +++ /dev/null @@ -1,87 +0,0 @@ -name: Building rusty1s Conda - -on: [workflow_dispatch] # yamllint disable-line rule:truthy - -jobs: - - conda-build: - runs-on: ${{ matrix.os }} - - strategy: - fail-fast: false - matrix: - os: [ubuntu-18.04, macos-10.15, windows-2019] - # We have troube building for Python 3.11 due to version conflicts. - python-version: ['3.7', '3.8', '3.9', '3.10'] # '3.11' - torch-version: [1.12.0, 1.13.0, 2.0.0] - cuda-version: ['cpu', 'cu102', 'cu113', 'cu116', 'cu117', 'cu118'] - exclude: - - torch-version: 1.12.0 - python-version: '3.11' - - torch-version: 1.12.0 - cuda-version: 'cu117' - - torch-version: 1.12.0 - cuda-version: 'cu118' - - torch-version: 1.13.0 - python-version: '3.11' - - torch-version: 1.13.0 - cuda-version: 'cu102' - - torch-version: 1.13.0 - cuda-version: 'cu113' - - torch-version: 1.13.0 - cuda-version: 'cu118' - - torch-version: 2.0.0 - python-version: '3.7' - - torch-version: 2.0.0 - cuda-version: 'cu102' - - torch-version: 2.0.0 - cuda-version: 'cu113' - - torch-version: 2.0.0 - cuda-version: 'cu115' - - torch-version: 2.0.0 - cuda-version: 'cu116' - - os: macos-10.15 - cuda-version: 'cu102' - - os: macos-10.15 - cuda-version: 'cu113' - - os: macos-10.15 - cuda-version: 'cu116' - - os: macos-10.15 - cuda-version: 'cu117' - - os: macos-10.15 - cuda-version: 'cu118' - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - - name: Set up Conda for Python ${{ matrix.python-version }} - uses: conda-incubator/setup-miniconda@v2 - with: - python-version: ${{ matrix.python-version }} - - - name: Free up disk space - if: ${{ runner.os == 'Linux' }} - run: | - sudo rm -rf /usr/share/dotnet - - - name: Install Conda packages - run: | - conda install conda-build conda-verify --yes - shell: - bash -l {0} - - - name: Build Conda package - run: | - ./conda/pytorch-geometric/build_conda.sh ${{ matrix.python-version }} ${{ matrix.torch-version }} ${{ matrix.cuda-version }} - shell: - bash -l {0} - - - name: Publish Conda package - run: | - conda install anaconda-client --yes - anaconda upload --force --label main $HOME/conda-bld/*/*.tar.bz2 - env: - ANACONDA_API_TOKEN: ${{ secrets.RUSTY1S_ANACONDA_TOKEN }} - shell: - bash -l {0} diff --git a/pytorch_geometric-2.3.1/.github/workflows/full_testing.yml b/pytorch_geometric-2.3.1/.github/workflows/full_testing.yml deleted file mode 100644 index 7d05112..0000000 --- a/pytorch_geometric-2.3.1/.github/workflows/full_testing.yml +++ /dev/null @@ -1,46 +0,0 @@ -name: Full Testing - -on: # yamllint disable-line rule:truthy - workflow_dispatch: - schedule: - - cron: "0 6 * * *" # Everyday at 6:00am UTC/10:00pm PST - -jobs: - - pytest: - if: github.repository == 'pyg-team/pytorch_geometric' - runs-on: ${{ matrix.os }} - - strategy: - fail-fast: false - matrix: - os: [ubuntu-latest, windows-latest] - python-version: ['3.8', '3.9', '3.10'] - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - - name: Setup packages - uses: ./.github/actions/setup - with: - python-version: ${{ matrix.python-version }} - - - name: Install graphviz - if: ${{ runner.os == 'Linux' }} - run: | - sudo apt-get install graphviz - - - name: Install main package - run: | - pip install -e .[full,test] - - - name: Run tests - run: | - FULL_TEST=1 pytest --cov --cov-report=xml - shell: bash - - - name: Upload coverage - uses: codecov/codecov-action@v2 - with: - fail_ci_if_error: false diff --git a/pytorch_geometric-2.3.1/.github/workflows/install.yml b/pytorch_geometric-2.3.1/.github/workflows/install.yml deleted file mode 100644 index 3d7a01e..0000000 --- a/pytorch_geometric-2.3.1/.github/workflows/install.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Minimal Installation - -on: # yamllint disable-line rule:truthy - push: - branches: - - master - pull_request: - -jobs: - - import: - runs-on: ubuntu-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - with: - fetch-depth: 40 - - # Skip workflow if only certain files have been changed. - - name: Get changed files - id: changed-files-specific - uses: tj-actions/changed-files@v34 - with: - files: | - benchmark/** - conda/** - docker/** - docs/** - examples/** - graphgym/** - CHANGELOG.md - - - name: Setup packages - if: steps.changed-files-specific.outputs.only_changed != 'true' - uses: ./.github/actions/setup - with: - full_install: false - - - name: Install main package - if: steps.changed-files-specific.outputs.only_changed != 'true' - run: | - pip install -e . - - - name: Test imports - if: steps.changed-files-specific.outputs.only_changed != 'true' - run: | - python -c "import torch_geometric" - python -c "import torch_geometric.contrib" - python -c "import torch_geometric.graphgym" diff --git a/pytorch_geometric-2.3.1/.github/workflows/nightly.yml b/pytorch_geometric-2.3.1/.github/workflows/nightly.yml deleted file mode 100644 index 27c0b64..0000000 --- a/pytorch_geometric-2.3.1/.github/workflows/nightly.yml +++ /dev/null @@ -1,48 +0,0 @@ -name: Nightly Build - -on: # yamllint disable-line rule:truthy - workflow_dispatch: - schedule: - - cron: "0 6 * * *" # Everyday at 6:00am UTC/10:00pm PST - -jobs: - - sdist: - if: github.repository == 'pyg-team/pytorch_geometric' - runs-on: ubuntu-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - - name: Set up Python - uses: actions/setup-python@v4.3.0 - with: - python-version: 3.8 - check-latest: true - cache: pip - cache-dependency-path: | - setup.py - - - name: Set version - run: echo "VERSION=$(sed -n "s/^__version__ = '\(.*\)'/\1/p" torch_geometric/__init__.py)" >> ${GITHUB_ENV} - - - name: Set time - run: echo "TODAY=$(date +'%Y%m%d')" >> ${GITHUB_ENV} - - - name: Customize build version - run: | - sed -i "s/name='torch_geometric'/name='pyg-nightly'/" setup.py - sed -i "s/$VERSION/$VERSION.dev$TODAY/" setup.py - sed -i "s/$VERSION/$VERSION.dev$TODAY/" torch_geometric/__init__.py - sed -i 's/name="torch_geometric"/name="pyg-nightly"/' pyproject.toml - sed -i "s/version=\"$VERSION\"/version=\"$VERSION.dev$TODAY\"/" pyproject.toml - - - name: Build package - run: python setup.py sdist - - - name: Publish package - uses: pypa/gh-action-pypi-publish@release/v1 - with: - user: __token__ - password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/pytorch_geometric-2.3.1/.github/workflows/prev_testing.yml b/pytorch_geometric-2.3.1/.github/workflows/prev_testing.yml deleted file mode 100644 index 629e695..0000000 --- a/pytorch_geometric-2.3.1/.github/workflows/prev_testing.yml +++ /dev/null @@ -1,49 +0,0 @@ -name: Testing PyTorch 1.13 - -on: # yamllint disable-line rule:truthy - push: - branches: - - master - pull_request: - -jobs: - - prev_pytest: - runs-on: ubuntu-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - with: - fetch-depth: 40 - - # Skip workflow if only certain files have been changed. - - name: Get changed files - id: changed-files-specific - uses: tj-actions/changed-files@v34 - with: - files: | - benchmark/** - conda/** - docker/** - docs/** - examples/** - graphgym/** - CHANGELOG.md - - - name: Setup packages - if: steps.changed-files-specific.outputs.only_changed != 'true' - uses: ./.github/actions/setup - with: - torch-version: 1.13.0 - torchvision-version: 0.14.0 - - - name: Install main package - if: steps.changed-files-specific.outputs.only_changed != 'true' - run: | - pip install -e .[full,test] - - - name: Run tests - if: steps.changed-files-specific.outputs.only_changed != 'true' - run: | - pytest diff --git a/pytorch_geometric-2.3.1/.github/workflows/testing.yml b/pytorch_geometric-2.3.1/.github/workflows/testing.yml deleted file mode 100644 index cb444ef..0000000 --- a/pytorch_geometric-2.3.1/.github/workflows/testing.yml +++ /dev/null @@ -1,52 +0,0 @@ -name: Testing PyTorch 2.0 - -on: # yamllint disable-line rule:truthy - push: - branches: - - master - pull_request: - -jobs: - - pytest: - runs-on: ubuntu-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - with: - fetch-depth: 40 - - # Skip workflow if only certain files have been changed. - - name: Get changed files - id: changed-files-specific - uses: tj-actions/changed-files@v34 - with: - files: | - benchmark/** - conda/** - docker/** - docs/** - examples/** - graphgym/** - CHANGELOG.md - - - name: Setup packages - if: steps.changed-files-specific.outputs.only_changed != 'true' - uses: ./.github/actions/setup - - - name: Install main package - if: steps.changed-files-specific.outputs.only_changed != 'true' - run: | - pip install -e .[full,test] - - - name: Run tests - if: steps.changed-files-specific.outputs.only_changed != 'true' - run: | - pytest --cov --cov-report=xml - - - name: Upload coverage - if: steps.changed-files-specific.outputs.only_changed != 'true' - uses: codecov/codecov-action@v2 - with: - fail_ci_if_error: false diff --git a/pytorch_geometric-2.3.1/CHANGELOG.md b/pytorch_geometric-2.3.1/CHANGELOG.md deleted file mode 100644 index 073ba04..0000000 --- a/pytorch_geometric-2.3.1/CHANGELOG.md +++ /dev/null @@ -1,442 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. -The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - -## [2.4.0] - 2023-MM-DD - -### Added - -### Changed - -### Removed - -## [2.3.0] - 2023-03-23 - -### Added - -- Added a memory-efficient `utils.one_hot` implementation ([#7005](https://github.com/pyg-team/pytorch_geometric/pull/7005)) -- Added `HeteroDictLinear` and an optimized `FastHGTConv` module ([#6178](https://github.com/pyg-team/pytorch_geometric/pull/6178), [#6998](https://github.com/pyg-team/pytorch_geometric/pull/6998)) -- Added the `DenseGATConv` module ([#6928](https://github.com/pyg-team/pytorch_geometric/pull/6928)) -- Added `trim_to_layer` utility function for more efficient `NeighborLoader` use-cases ([#6661](https://github.com/pyg-team/pytorch_geometric/pull/6661)) -- Added the `DistMult` KGE model ([#6958](https://github.com/pyg-team/pytorch_geometric/pull/6958)) -- Added `HeteroData.set_value_dict` functionality ([#6961](https://github.com/pyg-team/pytorch_geometric/pull/6961), [#6974](https://github.com/pyg-team/pytorch_geometric/pull/6974)) -- Added PyTorch >= 2.0 support ([#6934](https://github.com/pyg-team/pytorch_geometric/pull/6934), [#7000](https://github.com/pyg-team/pytorch_geometric/pull/7000)) -- Added PyTorch Lightning >= 2.0 support ([#6929](https://github.com/pyg-team/pytorch_geometric/pull/6929)) -- Added the `ComplEx` KGE model ([#6898](https://github.com/pyg-team/pytorch_geometric/pull/6898)) -- Added option to write benchmark results to csv ([#6888](https://github.com/pyg-team/pytorch_geometric/pull/6888)) -- Added `HeteroLayerNorm` and `HeteroBatchNorm` layers ([#6838](https://github.com/pyg-team/pytorch_geometric/pull/6838)) -- Added the `HeterophilousGraphDataset` suite ([#6846](https://github.com/pyg-team/pytorch_geometric/pull/6846)) -- Added support for sparse tensor in full batch mode inference benchmark ([#6843](https://github.com/pyg-team/pytorch_geometric/pull/6843)) -- Enabled `NeighborLoader` to return number of sampled nodes and edges per hop ([#6834](https://github.com/pyg-team/pytorch_geometric/pull/6834)) -- Added `ZipLoader` to execute multiple `NodeLoader` or `LinkLoader` instances ([#6829](https://github.com/pyg-team/pytorch_geometric/issues/6829)) -- Added common `utils.select` and `utils.narrow` functionality to support filtering of both tensors and lists ([#6162](https://github.com/pyg-team/pytorch_geometric/issues/6162)) -- Support `normalization` customization in `get_mesh_laplacian` ([#6790](https://github.com/pyg-team/pytorch_geometric/issues/6790)) -- Added the `TemporalEncoding` module ([#6785](https://github.com/pyg-team/pytorch_geometric/pull/6785)) -- Added CPU-optimized `spmm_reduce` functionality via CSR format ([#6699](https://github.com/pyg-team/pytorch_geometric/pull/6699), [#6759](https://github.com/pyg-team/pytorch_geometric/pull/6759)) -- Added support for the revised version of the `MD17` dataset ([#6734](https://github.com/pyg-team/pytorch_geometric/pull/6734)) -- Added TorchScript support to the `RECT_L` model ([#6727](https://github.com/pyg-team/pytorch_geometric/pull/6727)) -- Added TorchScript support to the `Node2Vec` model ([#6726](https://github.com/pyg-team/pytorch_geometric/pull/6726)) -- Added `utils.to_edge_index` to convert sparse tensors to edge indices and edge attributes ([#6728](https://github.com/pyg-team/pytorch_geometric/issues/6728)) -- Fixed expected data format in `PolBlogs` dataset ([#6714](https://github.com/pyg-team/pytorch_geometric/issues/6714)) -- Added `SimpleConv` to perform non-trainable propagation ([#6718](https://github.com/pyg-team/pytorch_geometric/pull/6718)) -- Added a `RemoveDuplicatedEdges` transform ([#6709](https://github.com/pyg-team/pytorch_geometric/pull/6709)) -- Added TorchScript support to the `LINKX` model ([#6712](https://github.com/pyg-team/pytorch_geometric/pull/6712)) -- Added `torch.jit` examples for `example/film.py` and `example/gcn.py`([#6602](https://github.com/pyg-team/pytorch_geometric/pull/6692)) -- Added `Pad` transform ([#5940](https://github.com/pyg-team/pytorch_geometric/pull/5940), [#6697](https://github.com/pyg-team/pytorch_geometric/pull/6697), [#6731](https://github.com/pyg-team/pytorch_geometric/pull/6731), [#6758](https://github.com/pyg-team/pytorch_geometric/pull/6758)) -- Added full batch mode to the inference benchmark ([#6631](https://github.com/pyg-team/pytorch_geometric/pull/6631)) -- Added `cat` aggregation type to the `HeteroConv` class so that features can be concatenated during grouping ([#6634](https://github.com/pyg-team/pytorch_geometric/pull/6634)) -- Added `torch.compile` support and benchmark study ([#6610](https://github.com/pyg-team/pytorch_geometric/pull/6610), [#6952](https://github.com/pyg-team/pytorch_geometric/pull/6952), [#6953](https://github.com/pyg-team/pytorch_geometric/pull/6953), [#6980](https://github.com/pyg-team/pytorch_geometric/pull/6980), [#6983](https://github.com/pyg-team/pytorch_geometric/pull/6983), [#6984](https://github.com/pyg-team/pytorch_geometric/pull/6984), [#6985](https://github.com/pyg-team/pytorch_geometric/pull/6985), [#6986](https://github.com/pyg-team/pytorch_geometric/pull/6986), [#6989](https://github.com/pyg-team/pytorch_geometric/pull/6989), [#7002](https://github.com/pyg-team/pytorch_geometric/pull/7002)) -- Added the `AntiSymmetricConv` layer ([#6577](https://github.com/pyg-team/pytorch_geometric/pull/6577)) -- Added a mixin for Huggingface model hub integration ([#5930](https://github.com/pyg-team/pytorch_geometric/pull/5930), [#6591](https://github.com/pyg-team/pytorch_geometric/pull/6591)) -- Added support for accelerated GNN layers in `nn.conv.cugraph` via `cugraph-ops` ([#6278](https://github.com/pyg-team/pytorch_geometric/pull/6278), [#6388](https://github.com/pyg-team/pytorch_geometric/pull/6388), [#6412](https://github.com/pyg-team/pytorch_geometric/pull/6412)) -- Added accelerated `index_sort` function from `pyg-lib` for faster sorting ([#6554](https://github.com/pyg-team/pytorch_geometric/pull/6554)) -- Fix incorrect device in `EquilibriumAggregration` ([#6560](https://github.com/pyg-team/pytorch_geometric/pull/6560)) -- Added bipartite graph support in `dense_to_sparse()` ([#6546](https://github.com/pyg-team/pytorch_geometric/pull/6546)) -- Add CPU affinity support for more data loaders ([#6534](https://github.com/pyg-team/pytorch_geometric/pull/6534), [#6922](https://github.com/pyg-team/pytorch_geometric/pull/6922)) -- Added the `BAMultiShapesDataset` ([#6541](https://github.com/pyg-team/pytorch_geometric/pull/6541)) -- Added the interfaces of a graph pooling framework ([#6540](https://github.com/pyg-team/pytorch_geometric/pull/6540)) -- Added automatic `n_id` and `e_id` attributes to mini-batches produced by `NodeLoader` and `LinkLoader` ([#6524](https://github.com/pyg-team/pytorch_geometric/pull/6524)) -- Added `PGMExplainer` to `torch_geometric.contrib` ([#6149](https://github.com/pyg-team/pytorch_geometric/pull/6149), [#6588](https://github.com/pyg-team/pytorch_geometric/pull/6588), [#6589](https://github.com/pyg-team/pytorch_geometric/pull/6589)) -- Added a `NumNeighbors` helper class for specifying the number of neighbors when sampling ([#6501](https://github.com/pyg-team/pytorch_geometric/pull/6501), [#6505](https://github.com/pyg-team/pytorch_geometric/pull/6505), [#6690](https://github.com/pyg-team/pytorch_geometric/pull/6690)) -- Added caching to `is_node_attr()` and `is_edge_attr()` calls ([#6492](https://github.com/pyg-team/pytorch_geometric/pull/6492)) -- Added `ToHeteroLinear` and `ToHeteroMessagePassing` modules to accelerate `to_hetero` functionality ([#5992](https://github.com/pyg-team/pytorch_geometric/pull/5992), [#6456](https://github.com/pyg-team/pytorch_geometric/pull/6456)) -- Added `GraphMaskExplainer` ([#6284](https://github.com/pyg-team/pytorch_geometric/pull/6284)) -- Added the `GRBCD` and `PRBCD` adversarial attack models ([#5972](https://github.com/pyg-team/pytorch_geometric/pull/5972)) -- Added `dropout` option to `SetTransformer` and `GraphMultisetTransformer` ([#6484](https://github.com/pyg-team/pytorch_geometric/pull/6484)) -- Added option to customize loader arguments for evaluation in `LightningNodeData` and `LightningLinkData` ([#6450](https://github.com/pyg-team/pytorch_geometric/pull/6450), [#6456](https://github.com/pyg-team/pytorch_geometric/pull/6456)) -- Added option to customize `num_neighbors` in `NeighborSampler` after instantiation ([#6446](https://github.com/pyg-team/pytorch_geometric/pull/6446)) -- Added the `Taobao` dataset and a corresponding example for it ([#6144](https://github.com/pyg-team/pytorch_geometric/pull/6144)) -- Added `pyproject.toml` ([#6431](https://github.com/pyg-team/pytorch_geometric/pull/6431)) -- Added the `torch_geometric.contrib` sub-package ([#6422](https://github.com/pyg-team/pytorch_geometric/pull/6422)) -- Warn on using latest documentation ([#6418](https://github.com/pyg-team/pytorch_geometric/pull/6418)) -- Added basic `pyright` type checker support ([#6415](https://github.com/pyg-team/pytorch_geometric/pull/6415)) -- Added a new external resource for link prediction ([#6396](https://github.com/pyg-team/pytorch_geometric/pull/6396)) -- Added `CaptumExplainer` ([#6383](https://github.com/pyg-team/pytorch_geometric/pull/6383), [#6387](https://github.com/pyg-team/pytorch_geometric/pull/6387), [#6433](https://github.com/pyg-team/pytorch_geometric/pull/6433), [#6487](https://github.com/pyg-team/pytorch_geometric/pull/6487), [#6966](https://github.com/pyg-team/pytorch_geometric/pull/6966)) -- Added support for custom `HeteroData` mini-batch class in remote backends ([#6377](https://github.com/pyg-team/pytorch_geometric/pull/6377)) -- Added the `GNNFF` model ([#5866](https://github.com/pyg-team/pytorch_geometric/pull/5866)) -- Added `MLPAggregation`, `SetTransformerAggregation`, `GRUAggregation`, and `DeepSetsAggregation` as adaptive readout functions ([#6301](https://github.com/pyg-team/pytorch_geometric/pull/6301), [#6336](https://github.com/pyg-team/pytorch_geometric/pull/6336), [#6338](https://github.com/pyg-team/pytorch_geometric/pull/6338)) -- Added `Dataset.to_datapipe` for converting PyG datasets into a torchdata `DataPipe`([#6141](https://github.com/pyg-team/pytorch_geometric/pull/6141)) -- Added `to_nested_tensor` and `from_nested_tensor` functionality ([#6329](https://github.com/pyg-team/pytorch_geometric/pull/6329), [#6330](https://github.com/pyg-team/pytorch_geometric/pull/6330), [#6331](https://github.com/pyg-team/pytorch_geometric/pull/6331), [#6332](https://github.com/pyg-team/pytorch_geometric/pull/6332)) -- Added the `GPSConv` Graph Transformer layer and example ([#6326](https://github.com/pyg-team/pytorch_geometric/pull/6326), [#6327](https://github.com/pyg-team/pytorch_geometric/pull/6327)) -- Added `networkit` conversion utilities ([#6321](https://github.com/pyg-team/pytorch_geometric/pull/6321)) -- Added global dataset attribute access via `dataset.{attr_name}` ([#6319](https://github.com/pyg-team/pytorch_geometric/pull/6319)) -- Added the `TransE` KGE model and example ([#6314](https://github.com/pyg-team/pytorch_geometric/pull/6314)) -- Added the Freebase `FB15k_237` dataset ([#3204](https://github.com/pyg-team/pytorch_geometric/pull/3204)) -- Added `Data.update()` and `HeteroData.update()` functionality ([#6313](https://github.com/pyg-team/pytorch_geometric/pull/6313)) -- Added `PGExplainer` ([#6204](https://github.com/pyg-team/pytorch_geometric/pull/6204)) -- Added the `AirfRANS` dataset ([#6287](https://github.com/pyg-team/pytorch_geometric/pull/6287)) -- Added `AttentionExplainer` ([#6279](https://github.com/pyg-team/pytorch_geometric/pull/6279)) -- Added (un)faithfulness explainability metric ([#6090](https://github.com/pyg-team/pytorch_geometric/pull/6090)) -- Added fidelity explainability metric ([#6116](https://github.com/pyg-team/pytorch_geometric/pull/6116), [#6510](https://github.com/pyg-team/pytorch_geometric/pull/6510)) -- Added subgraph visualization of GNN explanations ([#6235](https://github.com/pyg-team/pytorch_geometric/pull/6235), [#6271](https://github.com/pyg-team/pytorch_geometric/pull/6271)) -- Added weighted negative sampling option in `LinkNeighborLoader` ([#6264](https://github.com/pyg-team/pytorch_geometric/pull/6264)) -- Added the `BA2MotifDataset` explainer dataset ([#6257](https://github.com/pyg-team/pytorch_geometric/pull/6257)) -- Added `CycleMotif` motif generator to generate `n`-node cycle shaped motifs ([#6256](https://github.com/pyg-team/pytorch_geometric/pull/6256)) -- Added the `InfectionDataset` to evaluate explanations ([#6222](https://github.com/pyg-team/pytorch_geometric/pull/6222)) -- Added `characterization_score` and `fidelity_curve_auc` explainer metrics ([#6188](https://github.com/pyg-team/pytorch_geometric/pull/6188)) -- Added `get_message_passing_embeddings` ([#6201](https://github.com/pyg-team/pytorch_geometric/pull/6201)) -- Added the `PointGNNConv` layer ([#6194](https://github.com/pyg-team/pytorch_geometric/pull/6194)) -- Added `GridGraph` graph generator to generate grid graphs ([#6220](https://github.com/pyg-team/pytorch_geometric/pull/6220) -- Added explainability metrics for when ground truth is available ([#6137](https://github.com/pyg-team/pytorch_geometric/pull/6137)) -- Added `visualize_feature_importance` to support node feature visualizations ([#6094](https://github.com/pyg-team/pytorch_geometric/pull/6094)) -- Added heterogeneous graph support to `Explanation` framework ([#6091](https://github.com/pyg-team/pytorch_geometric/pull/6091), [#6218](https://github.com/pyg-team/pytorch_geometric/pull/6218)) -- Added a `CustomMotif` motif generator ([#6179](https://github.com/pyg-team/pytorch_geometric/pull/6179)) -- Added `ERGraph` graph generator to generate Ergos-Renyi (ER) graphs ([#6073](https://github.com/pyg-team/pytorch_geometric/pull/6073)) -- Added `BAGraph` graph generator to generate Barabasi-Albert graphs - the usage of `datasets.BAShapes` is now deprecated ([#6072](https://github.com/pyg-team/pytorch_geometric/pull/6072) -- Added explainability benchmark dataset framework ([#6104](https://github.com/pyg-team/pytorch_geometric/pull/6104)) -- Added `seed_time` attribute to temporal `NodeLoader` outputs in case `input_time` is given ([#6196](https://github.com/pyg-team/pytorch_geometric/pull/6196)) -- Added `Data.edge_subgraph` and `HeteroData.edge_subgraph` functionalities ([#6193](https://github.com/pyg-team/pytorch_geometric/pull/6193)) -- Added `input_time` option to `LightningNodeData` and `transform_sampler_output` to `NodeLoader` and `LinkLoader` ([#6187](https://github.com/pyg-team/pytorch_geometric/pull/6187)) -- Added `summary` for PyG/PyTorch models ([#5859](https://github.com/pyg-team/pytorch_geometric/pull/5859), [#6161](https://github.com/pyg-team/pytorch_geometric/pull/6161)) -- Started adding `torch.sparse` support to PyG ([#5906](https://github.com/pyg-team/pytorch_geometric/pull/5906), [#5944](https://github.com/pyg-team/pytorch_geometric/pull/5944), [#6003](https://github.com/pyg-team/pytorch_geometric/pull/6003), [#6033](https://github.com/pyg-team/pytorch_geometric/pull/6033), [#6514](https://github.com/pyg-team/pytorch_geometric/pull/6514), [#6532](https://github.com/pyg-team/pytorch_geometric/pull/6532), [#6748](https://github.com/pyg-team/pytorch_geometric/pull/6748), [#6847](https://github.com/pyg-team/pytorch_geometric/pull/6847), [#6868](https://github.com/pyg-team/pytorch_geometric/pull/6868), [#6874](https://github.com/pyg-team/pytorch_geometric/pull/6874), [#6897](https://github.com/pyg-team/pytorch_geometric/pull/6897), [#6930](https://github.com/pyg-team/pytorch_geometric/pull/6930), [#6932](https://github.com/pyg-team/pytorch_geometric/pull/6932), [#6936](https://github.com/pyg-team/pytorch_geometric/pull/6936), [#6937](https://github.com/pyg-team/pytorch_geometric/pull/6937), [#6939](https://github.com/pyg-team/pytorch_geometric/pull/6939), [#6947](https://github.com/pyg-team/pytorch_geometric/pull/6947), [#6950](https://github.com/pyg-team/pytorch_geometric/pull/6950), [#6951](https://github.com/pyg-team/pytorch_geometric/pull/6951), [#6957](https://github.com/pyg-team/pytorch_geometric/pull/6957)) -- Add `inputs_channels` back in training benchmark ([#6154](https://github.com/pyg-team/pytorch_geometric/pull/6154)) -- Added support for dropping nodes in `utils.to_dense_batch` in case `max_num_nodes` is smaller than the number of nodes ([#6124](https://github.com/pyg-team/pytorch_geometric/pull/6124)) -- Added the RandLA-Net architecture as an example ([#5117](https://github.com/pyg-team/pytorch_geometric/pull/5117)) - -### Changed - -- Drop internal usage of `__dunder__` names ([#6999](https://github.com/pyg-team/pytorch_geometric/issues/6999)) -- Changed the interface of `sort_edge_index`, `coalesce` and `to_undirected` to only return single `edge_index` information in case the `edge_attr` argument is not specified ([#6875](https://github.com/pyg-team/pytorch_geometric/issues/6875), [#6879](https://github.com/pyg-team/pytorch_geometric/issues/6879), [#6893](https://github.com/pyg-team/pytorch_geometric/issues/6893)) -- Fixed a bug in `to_hetero` when using an uninitialized submodule without implementing `reset_parameters` ([#6863](https://github.com/pyg-team/pytorch_geometric/issues/6790)) -- Fixed a bug in `get_mesh_laplacian` ([#6790](https://github.com/pyg-team/pytorch_geometric/issues/6790)) -- Fixed a bug in which masks were not properly masked in `GNNExplainer` on link prediction tasks ([#6787](https://github.com/pyg-team/pytorch_geometric/pull/6787)) -- Allow the usage of `ChebConv` within `GNNExplainer` ([#6778](https://github.com/pyg-team/pytorch_geometric/pull/6778)) -- Allow setting the `EdgeStorage.num_edges` property ([#6710](https://github.com/pyg-team/pytorch_geometric/pull/6710)) -- Fixed a bug in `utils.bipartite_subgraph()` and updated docs of `HeteroData.subgraph()` ([#6654](https://github.com/pyg-team/pytorch_geometric/pull/6654)) -- Properly reset the `data_list` cache of an `InMemoryDataset` when accessing `dataset.data` ([#6685](https://github.com/pyg-team/pytorch_geometric/pull/6685)) -- Fixed a bug in `Data.subgraph()` and `HeteroData.subgraph()` ([#6613](https://github.com/pyg-team/pytorch_geometric/pull/6613)) -- Fixed a bug in `PNAConv` and `DegreeScalerAggregation` to correctly incorporate degree statistics of isolated nodes ([#6609](https://github.com/pyg-team/pytorch_geometric/pull/6609)) -- Improved code coverage ([#6523](https://github.com/pyg-team/pytorch_geometric/pull/6523), [#6538](https://github.com/pyg-team/pytorch_geometric/pull/6538), [#6555](https://github.com/pyg-team/pytorch_geometric/pull/6555), [#6558](https://github.com/pyg-team/pytorch_geometric/pull/6558), [#6568](https://github.com/pyg-team/pytorch_geometric/pull/6568), [#6573](https://github.com/pyg-team/pytorch_geometric/pull/6573), [#6578](https://github.com/pyg-team/pytorch_geometric/pull/6578), [#6597](https://github.com/pyg-team/pytorch_geometric/pull/6597), [#6600](https://github.com/pyg-team/pytorch_geometric/pull/6600), [#6618](https://github.com/pyg-team/pytorch_geometric/pull/6618), [#6619](https://github.com/pyg-team/pytorch_geometric/pull/6619), [#6621](https://github.com/pyg-team/pytorch_geometric/pull/6621), [#6623](https://github.com/pyg-team/pytorch_geometric/pull/6623), [#6637](https://github.com/pyg-team/pytorch_geometric/pull/6637), [#6638](https://github.com/pyg-team/pytorch_geometric/pull/6638), [#6640](https://github.com/pyg-team/pytorch_geometric/pull/6640), [#6645](https://github.com/pyg-team/pytorch_geometric/pull/6645), [#6648](https://github.com/pyg-team/pytorch_geometric/pull/6648), [#6647](https://github.com/pyg-team/pytorch_geometric/pull/6647), [#6653](https://github.com/pyg-team/pytorch_geometric/pull/6653), [#6657](https://github.com/pyg-team/pytorch_geometric/pull/6657), [#6662](https://github.com/pyg-team/pytorch_geometric/pull/6662), [#6664](https://github.com/pyg-team/pytorch_geometric/pull/6664), [#6667](https://github.com/pyg-team/pytorch_geometric/pull/6667), [#6668](https://github.com/pyg-team/pytorch_geometric/pull/6668), [#6669](https://github.com/pyg-team/pytorch_geometric/pull/6669), [#6670](https://github.com/pyg-team/pytorch_geometric/pull/6670), [#6671](https://github.com/pyg-team/pytorch_geometric/pull/6671), [#6673](https://github.com/pyg-team/pytorch_geometric/pull/6673), [#6675](https://github.com/pyg-team/pytorch_geometric/pull/6675), [#6676](https://github.com/pyg-team/pytorch_geometric/pull/6676), [#6677](https://github.com/pyg-team/pytorch_geometric/pull/6677), [#6678](https://github.com/pyg-team/pytorch_geometric/pull/6678), [#6681](https://github.com/pyg-team/pytorch_geometric/pull/6681), [#6683](https://github.com/pyg-team/pytorch_geometric/pull/6683), [#6703](https://github.com/pyg-team/pytorch_geometric/pull/6703), [#6720](https://github.com/pyg-team/pytorch_geometric/pull/6720), [#6735](https://github.com/pyg-team/pytorch_geometric/pull/6735), [#6736](https://github.com/pyg-team/pytorch_geometric/pull/6736), [#6763](https://github.com/pyg-team/pytorch_geometric/pull/6763), [#6781](https://github.com/pyg-team/pytorch_geometric/pull/6781), [#6797](https://github.com/pyg-team/pytorch_geometric/pull/6797), [#6799](https://github.com/pyg-team/pytorch_geometric/pull/6799), [#6824](https://github.com/pyg-team/pytorch_geometric/pull/6824), [#6858](https://github.com/pyg-team/pytorch_geometric/pull/6858)) -- Fixed a bug in which `data.to_heterogeneous()` filtered attributs in the wrong dimension ([#6522](https://github.com/pyg-team/pytorch_geometric/pull/6522)) -- Breaking Change: Temporal sampling will now also sample nodes with an equal timestamp to the seed time (requires `pyg-lib>0.1.0`) ([#6517](https://github.com/pyg-team/pytorch_geometric/pull/6517)) -- Changed `DataLoader` workers with affinity to start at `cpu0` ([#6512](https://github.com/pyg-team/pytorch_geometric/pull/6512)) -- Allow 1D input to `global_*_pool` functions ([#6504](https://github.com/pyg-team/pytorch_geometric/pull/6504)) -- Add information about dynamic shapes in `RGCNConv` ([#6482](https://github.com/pyg-team/pytorch_geometric/pull/6482)) -- Fixed the use of types removed in `numpy 1.24.0` ([#6495](https://github.com/pyg-team/pytorch_geometric/pull/6495)) -- Fixed keyword parameters in `examples/mnist_voxel_grid.py` ([#6478](https://github.com/pyg-team/pytorch_geometric/pull/6478)) -- Unified `LightningNodeData` and `LightningLinkData` code paths ([#6473](https://github.com/pyg-team/pytorch_geometric/pull/6473)) -- Allow indices with any integer type in `RGCNConv` ([#6463](https://github.com/pyg-team/pytorch_geometric/pull/6463)) -- Re-structured the documentation ([#6420](https://github.com/pyg-team/pytorch_geometric/pull/6420), [#6423](https://github.com/pyg-team/pytorch_geometric/pull/6423), [#6429](https://github.com/pyg-team/pytorch_geometric/pull/6429), [#6440](https://github.com/pyg-team/pytorch_geometric/pull/6440), [#6443](https://github.com/pyg-team/pytorch_geometric/pull/6443), [#6445](https://github.com/pyg-team/pytorch_geometric/pull/6445), [#6452](https://github.com/pyg-team/pytorch_geometric/pull/6452), [#6453](https://github.com/pyg-team/pytorch_geometric/pull/6453), [#6458](https://github.com/pyg-team/pytorch_geometric/pull/6458), [#6459](https://github.com/pyg-team/pytorch_geometric/pull/6459), [#6460](https://github.com/pyg-team/pytorch_geometric/pull/6460), [#6490](https://github.com/pyg-team/pytorch_geometric/pull/6490), [#6491](https://github.com/pyg-team/pytorch_geometric/pull/6491), [#6693](https://github.com/pyg-team/pytorch_geometric/pull/6693), [#6744](https://github.com/pyg-team/pytorch_geometric/pull/6744)) -- Fix the default arguments of `DataParallel` class ([#6376](https://github.com/pyg-team/pytorch_geometric/pull/6376)) -- Fix `ImbalancedSampler` on sliced `InMemoryDataset` ([#6374](https://github.com/pyg-team/pytorch_geometric/pull/6374)) -- Breaking Change: Changed the interface and implementation of `GraphMultisetTransformer` ([#6343](https://github.com/pyg-team/pytorch_geometric/pull/6343)) -- Fixed the approximate PPR variant in `transforms.GDC` to not crash on graphs with isolated nodes ([#6242](https://github.com/pyg-team/pytorch_geometric/pull/6242)) -- Added a warning when accesing `InMemoryDataset.data` ([#6318](https://github.com/pyg-team/pytorch_geometric/pull/6318)) -- Drop `SparseTensor` dependency in `GraphStore` ([#5517](https://github.com/pyg-team/pytorch_geometric/pull/5517)) -- Replace `NeighborSampler` with `NeighborLoader` in the distributed sampling example ([#6204](https://github.com/pyg-team/pytorch_geometric/pull/6307)) -- Fixed the filtering of node features in `transforms.RemoveIsolatedNodes` ([#6308](https://github.com/pyg-team/pytorch_geometric/pull/6308)) -- Fixed a bug in `DimeNet` that causes a output dimension mismatch ([#6305](https://github.com/pyg-team/pytorch_geometric/pull/6305)) -- Fixed `Data.to_heterogeneous()` with empty `edge_index` ([#6304](https://github.com/pyg-team/pytorch_geometric/pull/6304)) -- Unify `Explanation.node_mask` and `Explanation.node_feat_mask` ([#6267](https://github.com/pyg-team/pytorch_geometric/pull/6267)) -- Moved thresholding config of the `Explainer` to `Explanation` ([#6215](https://github.com/pyg-team/pytorch_geometric/pull/6215)) -- Fixed a bug in the output order in `HeteroLinear` for un-sorted type vectors ([#6198](https://github.com/pyg-team/pytorch_geometric/pull/6198)) -- Breaking Change: Move `ExplainerConfig` arguments to the `Explainer` class ([#6176](https://github.com/pyg-team/pytorch_geometric/pull/6176)) -- Refactored `NeighborSampler` to be input-type agnostic ([#6173](https://github.com/pyg-team/pytorch_geometric/pull/6173)) -- Infer correct CUDA device ID in `profileit` decorator ([#6164](https://github.com/pyg-team/pytorch_geometric/pull/6164)) -- Correctly use edge weights in `GDC` example ([#6159](https://github.com/pyg-team/pytorch_geometric/pull/6159)) -- Breaking Change: Moved PyTorch Lightning data modules to `torch_geometric.data.lightning` ([#6140](https://github.com/pyg-team/pytorch_geometric/pull/6140)) -- Make `torch_sparse` an optional dependency ([#6132](https://github.com/pyg-team/pytorch_geometric/pull/6132), [#6134](https://github.com/pyg-team/pytorch_geometric/pull/6134), [#6138](https://github.com/pyg-team/pytorch_geometric/pull/6138), [#6139](https://github.com/pyg-team/pytorch_geometric/pull/6139)) -- Optimized `utils.softmax` implementation ([#6113](https://github.com/pyg-team/pytorch_geometric/pull/6113), [#6155](https://github.com/pyg-team/pytorch_geometric/pull/6155), [#6805](https://github.com/pyg-team/pytorch_geometric/pull/6805)) -- Optimized `topk` implementation for large enough graphs ([#6123](https://github.com/pyg-team/pytorch_geometric/pull/6123)) - -### Removed - -- `torch-sparse` is now an optional dependency ([#6625](https://github.com/pyg-team/pytorch_geometric/pull/6625), [#6626](https://github.com/pyg-team/pytorch_geometric/pull/6626), [#6627](https://github.com/pyg-team/pytorch_geometric/pull/6627), [#6628](https://github.com/pyg-team/pytorch_geometric/pull/6628), [#6629](https://github.com/pyg-team/pytorch_geometric/pull/6629), [#6630](https://github.com/pyg-team/pytorch_geometric/pull/6630)) -- Removed most of the `torch-scatter` dependencies ([#6394](https://github.com/pyg-team/pytorch_geometric/pull/6394), [#6395](https://github.com/pyg-team/pytorch_geometric/pull/6395), [#6399](https://github.com/pyg-team/pytorch_geometric/pull/6399), [#6400](https://github.com/pyg-team/pytorch_geometric/pull/6400), [#6615](https://github.com/pyg-team/pytorch_geometric/pull/6615), [#6617](https://github.com/pyg-team/pytorch_geometric/pull/6617)) -- Removed the deprecated classes `GNNExplainer` and `Explainer` from `nn.models` ([#6382](https://github.com/pyg-team/pytorch_geometric/pull/6382)) -- Removed `target_index` argument in the `Explainer` interface ([#6270](https://github.com/pyg-team/pytorch_geometric/pull/6270)) -- Removed `Aggregation.set_validate_args` option ([#6175](https://github.com/pyg-team/pytorch_geometric/pull/6175)) - -## [2.2.0] - 2022-12-01 - -### Added - -- Extended `GNNExplainer` to support edge level explanations ([#6056](https://github.com/pyg-team/pytorch_geometric/pull/6056), [#6083](https://github.com/pyg-team/pytorch_geometric/pull/6083)) -- Added CPU affinitization for `NodeLoader` ([#6005](https://github.com/pyg-team/pytorch_geometric/pull/6005)) -- Added triplet sampling in `LinkNeighborLoader` ([#6004](https://github.com/pyg-team/pytorch_geometric/pull/6004)) -- Added `FusedAggregation` of simple scatter reductions ([#6036](https://github.com/pyg-team/pytorch_geometric/pull/6036)) -- Added a `to_smiles` function ([#6038](https://github.com/pyg-team/pytorch_geometric/pull/6038)) -- Added option to make normalization coefficients trainable in `PNAConv` ([#6039](https://github.com/pyg-team/pytorch_geometric/pull/6039)) -- Added `semi_grad` option in `VarAggregation` and `StdAggregation` ([#6042](https://github.com/pyg-team/pytorch_geometric/pull/6042)) -- Allow for fused aggregations in `MultiAggregation` ([#6036](https://github.com/pyg-team/pytorch_geometric/pull/6036), [#6040](https://github.com/pyg-team/pytorch_geometric/pull/6040)) -- Added `HeteroData` support for `to_captum_model` and added `to_captum_input` ([#5934](https://github.com/pyg-team/pytorch_geometric/pull/5934)) -- Added `HeteroData` support in `RandomNodeLoader` ([#6007](https://github.com/pyg-team/pytorch_geometric/pull/6007)) -- Added bipartite `GraphSAGE` example ([#5834](https://github.com/pyg-team/pytorch_geometric/pull/5834)) -- Added `LRGBDataset` to include 5 datasets from the [Long Range Graph Benchmark](https://openreview.net/pdf?id=in7XC5RcjEn) ([#5935](https://github.com/pyg-team/pytorch_geometric/pull/5935)) -- Added a warning for invalid node and edge type names in `HeteroData` ([#5990](https://github.com/pyg-team/pytorch_geometric/pull/5990)) -- Added PyTorch 1.13 support ([#5975](https://github.com/pyg-team/pytorch_geometric/pull/5975)) -- Added `int32` support in `NeighborLoader` ([#5948](https://github.com/pyg-team/pytorch_geometric/pull/5948)) -- Add `dgNN` support and `FusedGATConv` implementation ([#5140](https://github.com/pyg-team/pytorch_geometric/pull/5140)) -- Added `lr_scheduler_solver` and customized `lr_scheduler` classes ([#5942](https://github.com/pyg-team/pytorch_geometric/pull/5942)) -- Add `to_fixed_size` graph transformer ([#5939](https://github.com/pyg-team/pytorch_geometric/pull/5939)) -- Add support for symbolic tracing of `SchNet` model ([#5938](https://github.com/pyg-team/pytorch_geometric/pull/5938)) -- Add support for customizable interaction graph in `SchNet` model ([#5919](https://github.com/pyg-team/pytorch_geometric/pull/5919)) -- Started adding `torch.sparse` support to PyG ([#5906](https://github.com/pyg-team/pytorch_geometric/pull/5906), [#5944](https://github.com/pyg-team/pytorch_geometric/pull/5944), [#6003](https://github.com/pyg-team/pytorch_geometric/pull/6003), [#6633](https://github.com/pyg-team/pytorch_geometric/pull/6633)) -- Added `HydroNet` water cluster dataset ([#5537](https://github.com/pyg-team/pytorch_geometric/pull/5537), [#5902](https://github.com/pyg-team/pytorch_geometric/pull/5902), [#5903](https://github.com/pyg-team/pytorch_geometric/pull/5903)) -- Added explainability support for heterogeneous GNNs ([#5886](https://github.com/pyg-team/pytorch_geometric/pull/5886)) -- Added `SparseTensor` support to `SuperGATConv` ([#5888](https://github.com/pyg-team/pytorch_geometric/pull/5888)) -- Added TorchScript support for `AttentiveFP `([#5868](https://github.com/pyg-team/pytorch_geometric/pull/5868)) -- Added `num_steps` argument to training and inference benchmarks ([#5898](https://github.com/pyg-team/pytorch_geometric/pull/5898)) -- Added `torch.onnx.export` support ([#5877](https://github.com/pyg-team/pytorch_geometric/pull/5877), [#5997](https://github.com/pyg-team/pytorch_geometric/pull/5997)) -- Enable VTune ITT in inference and training benchmarks ([#5830](https://github.com/pyg-team/pytorch_geometric/pull/5830), [#5878](https://github.com/pyg-team/pytorch_geometric/pull/5878)) -- Add training benchmark ([#5774](https://github.com/pyg-team/pytorch_geometric/pull/5774)) -- Added a "Link Prediction on MovieLens" Colab notebook ([#5823](https://github.com/pyg-team/pytorch_geometric/pull/5823)) -- Added custom `sampler` support in `LightningDataModule` ([#5820](https://github.com/pyg-team/pytorch_geometric/pull/5820)) -- Added a `return_semantic_attention_weights` argument `HANConv` ([#5787](https://github.com/pyg-team/pytorch_geometric/pull/5787)) -- Added `disjoint` argument to `NeighborLoader` and `LinkNeighborLoader` ([#5775](https://github.com/pyg-team/pytorch_geometric/pull/5775)) -- Added support for `input_time` in `NeighborLoader` ([#5763](https://github.com/pyg-team/pytorch_geometric/pull/5763)) -- Added `disjoint` mode for temporal `LinkNeighborLoader` ([#5717](https://github.com/pyg-team/pytorch_geometric/pull/5717)) -- Added `HeteroData` support for `transforms.Constant` ([#5700](https://github.com/pyg-team/pytorch_geometric/pull/5700)) -- Added `np.memmap` support in `NeighborLoader` ([#5696](https://github.com/pyg-team/pytorch_geometric/pull/5696)) -- Added `assortativity` that computes degree assortativity coefficient ([#5587](https://github.com/pyg-team/pytorch_geometric/pull/5587)) -- Added `SSGConv` layer ([#5599](https://github.com/pyg-team/pytorch_geometric/pull/5599)) -- Added `shuffle_node`, `mask_feature` and `add_random_edge` augmentation methdos ([#5548](https://github.com/pyg-team/pytorch_geometric/pull/5548)) -- Added `dropout_path` augmentation that drops edges from a graph based on random walks ([#5531](https://github.com/pyg-team/pytorch_geometric/pull/5531)) -- Add support for filling labels with dummy values in `HeteroData.to_homogeneous()` ([#5540](https://github.com/pyg-team/pytorch_geometric/pull/5540)) -- Added `temporal_strategy` option to `neighbor_sample` ([#5576](https://github.com/pyg-team/pyg-lib/pull/5576)) -- Added `torch_geometric.sampler` package to docs ([#5563](https://github.com/pyg-team/pytorch_geometric/pull/5563)) -- Added the `DGraphFin` dynamic graph dataset ([#5504](https://github.com/pyg-team/pytorch_geometric/pull/5504)) -- Added `dropout_edge` augmentation that randomly drops edges from a graph - the usage of `dropout_adj` is now deprecated ([#5495](https://github.com/pyg-team/pytorch_geometric/pull/5495)) -- Added `dropout_node` augmentation that randomly drops nodes from a graph ([#5481](https://github.com/pyg-team/pytorch_geometric/pull/5481)) -- Added `AddRandomMetaPaths` that adds edges based on random walks along a metapath ([#5397](https://github.com/pyg-team/pytorch_geometric/pull/5397)) -- Added `WLConvContinuous` for performing WL refinement with continuous attributes ([#5316](https://github.com/pyg-team/pytorch_geometric/pull/5316)) -- Added `print_summary` method for the `torch_geometric.data.Dataset` interface ([#5438](https://github.com/pyg-team/pytorch_geometric/pull/5438)) -- Added `sampler` support to `LightningDataModule` ([#5456](https://github.com/pyg-team/pytorch_geometric/pull/5456), [#5457](https://github.com/pyg-team/pytorch_geometric/pull/5457)) -- Added official splits to `MalNetTiny` dataset ([#5078](https://github.com/pyg-team/pytorch_geometric/pull/5078)) -- Added `IndexToMask` and `MaskToIndex` transforms ([#5375](https://github.com/pyg-team/pytorch_geometric/pull/5375), [#5455](https://github.com/pyg-team/pytorch_geometric/pull/5455)) -- Added `FeaturePropagation` transform ([#5387](https://github.com/pyg-team/pytorch_geometric/pull/5387)) -- Added `PositionalEncoding` ([#5381](https://github.com/pyg-team/pytorch_geometric/pull/5381)) -- Consolidated sampler routines behind `torch_geometric.sampler`, enabling ease of extensibility in the future ([#5312](https://github.com/pyg-team/pytorch_geometric/pull/5312), [#5365](https://github.com/pyg-team/pytorch_geometric/pull/5365), [#5402](https://github.com/pyg-team/pytorch_geometric/pull/5402), [#5404](https://github.com/pyg-team/pytorch_geometric/pull/5404)), [#5418](https://github.com/pyg-team/pytorch_geometric/pull/5418)) -- Added `pyg-lib` neighbor sampling ([#5384](https://github.com/pyg-team/pytorch_geometric/pull/5384), [#5388](https://github.com/pyg-team/pytorch_geometric/pull/5388)) -- Added `pyg_lib.segment_matmul` integration within `HeteroLinear` ([#5330](https://github.com/pyg-team/pytorch_geometric/pull/5330), [#5347](https://github.com/pyg-team/pytorch_geometric/pull/5347))) -- Enabled `bf16` support in benchmark scripts ([#5293](https://github.com/pyg-team/pytorch_geometric/pull/5293), [#5341](https://github.com/pyg-team/pytorch_geometric/pull/5341)) -- Added `Aggregation.set_validate_args` option to skip validation of `dim_size` ([#5290](https://github.com/pyg-team/pytorch_geometric/pull/5290)) -- Added `SparseTensor` support to inference and training benchmark suite ([#5242](https://github.com/pyg-team/pytorch_geometric/pull/5242), [#5258](https://github.com/pyg-team/pytorch_geometric/pull/5258), [#5881](https://github.com/pyg-team/pytorch_geometric/pull/5881)) -- Added experimental mode in inference benchmarks ([#5254](https://github.com/pyg-team/pytorch_geometric/pull/5254)) -- Added node classification example instrumented with [Weights and Biases (W&B) logging](https://wandb.com) and [W&B Sweeps](https://wandb.com/sweeps) ([#5192](https://github.com/pyg-team/pytorch_geometric/pull/5192)) -- Added experimental mode for `utils.scatter` ([#5232](https://github.com/pyg-team/pytorch_geometric/pull/5232), [#5241](https://github.com/pyg-team/pytorch_geometric/pull/5241), [#5386](https://github.com/pyg-team/pytorch_geometric/pull/5386)) -- Added missing test labels in `HGBDataset` ([#5233](https://github.com/pyg-team/pytorch_geometric/pull/5233)) -- Added `BaseStorage.get()` functionality ([#5240](https://github.com/pyg-team/pytorch_geometric/pull/5240)) -- Added a test to confirm that `to_hetero` works with `SparseTensor` ([#5222](https://github.com/pyg-team/pytorch_geometric/pull/5222)) -- Added `torch_geometric.explain` module with base functionality for explainability methods ([#5804](https://github.com/pyg-team/pytorch_geometric/pull/5804), [#6054](https://github.com/pyg-team/pytorch_geometric/pull/6054), [#6089](https://github.com/pyg-team/pytorch_geometric/pull/6089)) - -### Changed - -- Moved and adapted `GNNExplainer` from `torch_geometric.nn` to `torch_geometric.explain.algorithm` ([#5967](https://github.com/pyg-team/pytorch_geometric/pull/5967), [#6065](https://github.com/pyg-team/pytorch_geometric/pull/6065)) -- Optimized scatter implementations for CPU/GPU, both with and without backward computation ([#6051](https://github.com/pyg-team/pytorch_geometric/pull/6051), [#6052](https://github.com/pyg-team/pytorch_geometric/pull/6052)) -- Support temperature value in `dense_mincut_pool` ([#5908](https://github.com/pyg-team/pytorch_geometric/pull/5908)) -- Fixed a bug in which `VirtualNode` mistakenly treated node features as edge features ([#5819](https://github.com/pyg-team/pytorch_geometric/pull/5819)) -- Fixed `setter` and `getter` handling in `BaseStorage` ([#5815](https://github.com/pyg-team/pytorch_geometric/pull/5815)) -- Fixed `path` in `hetero_conv_dblp.py` example ([#5686](https://github.com/pyg-team/pytorch_geometric/pull/5686)) -- Fix `auto_select_device` routine in GraphGym for PyTorch Lightning>=1.7 ([#5677](https://github.com/pyg-team/pytorch_geometric/pull/5677)) -- Support `in_channels` with `tuple` in `GENConv` for bipartite message passing ([#5627](https://github.com/pyg-team/pytorch_geometric/pull/5627), [#5641](https://github.com/pyg-team/pytorch_geometric/pull/5641)) -- Handle cases of not having enough possible negative edges in `RandomLinkSplit` ([#5642](https://github.com/pyg-team/pytorch_geometric/pull/5642)) -- Fix `RGCN+pyg-lib` for `LongTensor` input ([#5610](https://github.com/pyg-team/pytorch_geometric/pull/5610)) -- Improved type hint support ([#5842](https://github.com/pyg-team/pytorch_geometric/pull/5842), [#5603](https://github.com/pyg-team/pytorch_geometric/pull/5603), [#5659](https://github.com/pyg-team/pytorch_geometric/pull/5659), [#5664](https://github.com/pyg-team/pytorch_geometric/pull/5664), [#5665](https://github.com/pyg-team/pytorch_geometric/pull/5665), [#5666](https://github.com/pyg-team/pytorch_geometric/pull/5666), [#5667](https://github.com/pyg-team/pytorch_geometric/pull/5667), [#5668](https://github.com/pyg-team/pytorch_geometric/pull/5668), [#5669](https://github.com/pyg-team/pytorch_geometric/pull/5669), [#5673](https://github.com/pyg-team/pytorch_geometric/pull/5673), [#5675](https://github.com/pyg-team/pytorch_geometric/pull/5675), [#5673](https://github.com/pyg-team/pytorch_geometric/pull/5676), [#5678](https://github.com/pyg-team/pytorch_geometric/pull/5678), [#5682](https://github.com/pyg-team/pytorch_geometric/pull/5682), [#5683](https://github.com/pyg-team/pytorch_geometric/pull/5683), [#5684](https://github.com/pyg-team/pytorch_geometric/pull/5684), [#5685](https://github.com/pyg-team/pytorch_geometric/pull/5685), [#5687](https://github.com/pyg-team/pytorch_geometric/pull/5687), [#5688](https://github.com/pyg-team/pytorch_geometric/pull/5688), [#5695](https://github.com/pyg-team/pytorch_geometric/pull/5695), [#5699](https://github.com/pyg-team/pytorch_geometric/pull/5699), [#5701](https://github.com/pyg-team/pytorch_geometric/pull/5701), [#5702](https://github.com/pyg-team/pytorch_geometric/pull/5702), [#5703](https://github.com/pyg-team/pytorch_geometric/pull/5703), [#5706](https://github.com/pyg-team/pytorch_geometric/pull/5706), [#5707](https://github.com/pyg-team/pytorch_geometric/pull/5707), [#5710](https://github.com/pyg-team/pytorch_geometric/pull/5710), [#5714](https://github.com/pyg-team/pytorch_geometric/pull/5714), [#5715](https://github.com/pyg-team/pytorch_geometric/pull/5715), [#5716](https://github.com/pyg-team/pytorch_geometric/pull/5716), [#5722](https://github.com/pyg-team/pytorch_geometric/pull/5722), [#5724](https://github.com/pyg-team/pytorch_geometric/pull/5724), [#5725](https://github.com/pyg-team/pytorch_geometric/pull/5725), [#5726](https://github.com/pyg-team/pytorch_geometric/pull/5726), [#5729](https://github.com/pyg-team/pytorch_geometric/pull/5729), [#5730](https://github.com/pyg-team/pytorch_geometric/pull/5730), [#5731](https://github.com/pyg-team/pytorch_geometric/pull/5731), [#5732](https://github.com/pyg-team/pytorch_geometric/pull/5732), [#5733](https://github.com/pyg-team/pytorch_geometric/pull/5733), [#5743](https://github.com/pyg-team/pytorch_geometric/pull/5743), [#5734](https://github.com/pyg-team/pytorch_geometric/pull/5734), [#5735](https://github.com/pyg-team/pytorch_geometric/pull/5735), [#5736](https://github.com/pyg-team/pytorch_geometric/pull/5736), [#5737](https://github.com/pyg-team/pytorch_geometric/pull/5737), [#5738](https://github.com/pyg-team/pytorch_geometric/pull/5738), [#5747](https://github.com/pyg-team/pytorch_geometric/pull/5747), [#5752](https://github.com/pyg-team/pytorch_geometric/pull/5752), [#5753](https://github.com/pyg-team/pytorch_geometric/pull/5753), [#5754](https://github.com/pyg-team/pytorch_geometric/pull/5754), [#5756](https://github.com/pyg-team/pytorch_geometric/pull/5756), [#5757](https://github.com/pyg-team/pytorch_geometric/pull/5757), [#5758](https://github.com/pyg-team/pytorch_geometric/pull/5758), [#5760](https://github.com/pyg-team/pytorch_geometric/pull/5760), [#5766](https://github.com/pyg-team/pytorch_geometric/pull/5766), [#5767](https://github.com/pyg-team/pytorch_geometric/pull/5767), [#5768](https://github.com/pyg-team/pytorch_geometric/pull/5768)), [#5781](https://github.com/pyg-team/pytorch_geometric/pull/5781), [#5778](https://github.com/pyg-team/pytorch_geometric/pull/5778), [#5797](https://github.com/pyg-team/pytorch_geometric/pull/5797), [#5798](https://github.com/pyg-team/pytorch_geometric/pull/5798), [#5799](https://github.com/pyg-team/pytorch_geometric/pull/5799), [#5800](https://github.com/pyg-team/pytorch_geometric/pull/5800), [#5806](https://github.com/pyg-team/pytorch_geometric/pull/5806), [#5810](https://github.com/pyg-team/pytorch_geometric/pull/5810), [#5811](https://github.com/pyg-team/pytorch_geometric/pull/5811), [#5828](https://github.com/pyg-team/pytorch_geometric/pull/5828), [#5847](https://github.com/pyg-team/pytorch_geometric/pull/5847), [#5851](https://github.com/pyg-team/pytorch_geometric/pull/5851), [#5852](https://github.com/pyg-team/pytorch_geometric/pull/5852)) -- Avoid modifying `mode_kwargs` in `MultiAggregation` ([#5601](https://github.com/pyg-team/pytorch_geometric/pull/5601)) -- Changed `BatchNorm` to allow for batches of size one during training ([#5530](https://github.com/pyg-team/pytorch_geometric/pull/5530), [#5614](https://github.com/pyg-team/pytorch_geometric/pull/5614)) -- Integrated better temporal sampling support by requiring that local neighborhoods are sorted according to time ([#5516](https://github.com/pyg-team/pytorch_geometric/issues/5516), [#5602](https://github.com/pyg-team/pytorch_geometric/issues/5602)) -- Fixed a bug when applying several scalers with `PNAConv` ([#5514](https://github.com/pyg-team/pytorch_geometric/issues/5514)) -- Allow `.` in `ParameterDict` key names ([#5494](https://github.com/pyg-team/pytorch_geometric/pull/5494)) -- Renamed `drop_unconnected_nodes` to `drop_unconnected_node_types` and `drop_orig_edges` to `drop_orig_edge_types` in `AddMetapaths` ([#5490](https://github.com/pyg-team/pytorch_geometric/pull/5490)) -- Improved `utils.scatter` performance by explicitly choosing better implementation for `add` and `mean` reduction ([#5399](https://github.com/pyg-team/pytorch_geometric/pull/5399)) -- Fix `to_dense_adj` with empty `edge_index` ([#5476](https://github.com/pyg-team/pytorch_geometric/pull/5476)) -- The `AttentionalAggregation` module can now be applied to compute attentin on a per-feature level ([#5449](https://github.com/pyg-team/pytorch_geometric/pull/5449)) -- Ensure equal lenghts of `num_neighbors` across edge types in `NeighborLoader` ([#5444](https://github.com/pyg-team/pytorch_geometric/pull/5444)) -- Fixed a bug in `TUDataset` in which node features were wrongly constructed whenever `node_attributes` only hold a single feature (_e.g._, in `PROTEINS`) ([#5441](https://github.com/pyg-team/pytorch_geometric/pull/5441)) -- Breaking change: removed `num_neighbors` as an attribute of loader ([#5404](https://github.com/pyg-team/pytorch_geometric/pull/5404)) -- `ASAPooling` is now jittable ([#5395](https://github.com/pyg-team/pytorch_geometric/pull/5395)) -- Updated unsupervised `GraphSAGE` example to leverage `LinkNeighborLoader` ([#5317](https://github.com/pyg-team/pytorch_geometric/pull/5317)) -- Replace in-place operations with out-of-place ones to align with `torch.scatter_reduce` API ([#5353](https://github.com/pyg-team/pytorch_geometric/pull/5353)) -- Breaking bugfix: `PointTransformerConv` now correctly uses `sum` aggregation ([#5332](https://github.com/pyg-team/pytorch_geometric/pull/5332)) -- Improve out-of-bounds error message in `MessagePassing` ([#5339](https://github.com/pyg-team/pytorch_geometric/pull/5339)) -- Allow file names of a `Dataset` to be specified as either property and method ([#5338](https://github.com/pyg-team/pytorch_geometric/pull/5338)) -- Fixed separating a list of `SparseTensor` within `InMemoryDataset` ([#5299](https://github.com/pyg-team/pytorch_geometric/pull/5299)) -- Improved name resolving of normalization layers ([#5277](https://github.com/pyg-team/pytorch_geometric/pull/5277)) -- Fail gracefully on `GLIBC` errors within `torch-spline-conv` ([#5276](https://github.com/pyg-team/pytorch_geometric/pull/5276)) -- Fixed `Dataset.num_classes` in case a `transform` modifies `data.y` ([#5274](https://github.com/pyg-team/pytorch_geometric/pull/5274)) -- Allow customization of the activation function within `PNAConv` ([#5262](https://github.com/pyg-team/pytorch_geometric/pull/5262)) -- Do not fill `InMemoryDataset` cache on `dataset.num_features` ([#5264](https://github.com/pyg-team/pytorch_geometric/pull/5264)) -- Changed tests relying on `dblp` datasets to instead use synthetic data ([#5250](https://github.com/pyg-team/pytorch_geometric/pull/5250)) -- Fixed a bug for the initialization of activation function examples in `custom_graphgym` ([#5243](https://github.com/pyg-team/pytorch_geometric/pull/5243)) -- Allow any integer tensors when checking edge_index input to message passing ([5281](https://github.com/pyg-team/pytorch_geometric/pull/5281)) - -### Removed - -- Removed `scatter_reduce` option from experimental mode ([#5399](https://github.com/pyg-team/pytorch_geometric/pull/5399)) - -## [2.1.0] - 2022-08-17 - -### Added - -- Added the test for `DeepGCNLayer` ([#5704](https://github.com/pyg-team/pytorch_geometric/pull/5704)) -- Allow `.` in `ModuleDict` key names ([#5227](https://github.com/pyg-team/pytorch_geometric/pull/5227)) -- Added `edge_label_time` argument to `LinkNeighborLoader` ([#5137](https://github.com/pyg-team/pytorch_geometric/pull/5137), [#5173](https://github.com/pyg-team/pytorch_geometric/pull/5173)) -- Let `ImbalancedSampler` accept `torch.Tensor` as input ([#5138](https://github.com/pyg-team/pytorch_geometric/pull/5138)) -- Added `flow` argument to `gcn_norm` to correctly normalize the adjacency matrix in `GCNConv` ([#5149](https://github.com/pyg-team/pytorch_geometric/pull/5149)) -- `NeighborSampler` supports graphs without edges ([#5072](https://github.com/pyg-team/pytorch_geometric/pull/5072)) -- Added the `MeanSubtractionNorm` layer ([#5068](https://github.com/pyg-team/pytorch_geometric/pull/5068)) -- Added `pyg_lib.segment_matmul` integration within `RGCNConv` ([#5052](https://github.com/pyg-team/pytorch_geometric/pull/5052), [#5096](https://github.com/pyg-team/pytorch_geometric/pull/5096)) -- Support `SparseTensor` as edge label in `LightGCN` (#[5046](https://github.com/pyg-team/pytorch_geometric/issues/5046)) -- Added support for `BasicGNN` models within `to_hetero` ([#5091](https://github.com/pyg-team/pytorch_geometric/pull/5091)) -- Added support for computing weighted metapaths in `AddMetapaths` ([#5049](https://github.com/pyg-team/pytorch_geometric/pull/5049)) -- Added inference benchmark suite ([#4915](https://github.com/pyg-team/pytorch_geometric/pull/4915)) -- Added a dynamically sized batch sampler for filling a mini-batch with a variable number of samples up to a maximum size ([#4972](https://github.com/pyg-team/pytorch_geometric/pull/4972)) -- Added fine grained options for setting `bias` and `dropout` per layer in the `MLP` model ([#4981](https://github.com/pyg-team/pytorch_geometric/pull/4981)) -- Added `EdgeCNN` model ([#4991](https://github.com/pyg-team/pytorch_geometric/pull/4991)) -- Added scalable `inference` mode in `BasicGNN` with layer-wise neighbor loading ([#4977](https://github.com/pyg-team/pytorch_geometric/pull/4977)) -- Added inference benchmarks ([#4892](https://github.com/pyg-team/pytorch_geometric/pull/4892), [#5107](https://github.com/pyg-team/pytorch_geometric/pull/5107)) -- Added PyTorch 1.12 support ([#4975](https://github.com/pyg-team/pytorch_geometric/pull/4975)) -- Added `unbatch_edge_index` functionality for splitting an `edge_index` tensor according to a `batch` vector ([#4903](https://github.com/pyg-team/pytorch_geometric/pull/4903)) -- Added node-wise normalization mode in `LayerNorm` ([#4944](https://github.com/pyg-team/pytorch_geometric/pull/4944)) -- Added support for `normalization_resolver` ([#4926](https://github.com/pyg-team/pytorch_geometric/pull/4926), [#4951](https://github.com/pyg-team/pytorch_geometric/pull/4951), [#4958](https://github.com/pyg-team/pytorch_geometric/pull/4958), [#4959](https://github.com/pyg-team/pytorch_geometric/pull/4959)) -- Added notebook tutorial for `torch_geometric.nn.aggr` package to documentation ([#4927](https://github.com/pyg-team/pytorch_geometric/pull/4927)) -- Added support for `follow_batch` for lists or dictionaries of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) -- Added `Data.validate()` and `HeteroData.validate()` functionality ([#4885](https://github.com/pyg-team/pytorch_geometric/pull/4885)) -- Added `LinkNeighborLoader` support to `LightningDataModule` ([#4868](https://github.com/pyg-team/pytorch_geometric/pull/4868)) -- Added `predict()` support to the `LightningNodeData` module ([#4884](https://github.com/pyg-team/pytorch_geometric/pull/4884)) -- Added `time_attr` argument to `LinkNeighborLoader` ([#4877](https://github.com/pyg-team/pytorch_geometric/pull/4877), [#4908](https://github.com/pyg-team/pytorch_geometric/pull/4908)) -- Added a `filter_per_worker` argument to data loaders to allow filtering of data within sub-processes ([#4873](https://github.com/pyg-team/pytorch_geometric/pull/4873)) -- Added a `NeighborLoader` benchmark script ([#4815](https://github.com/pyg-team/pytorch_geometric/pull/4815), [#4862](https://github.com/pyg-team/pytorch_geometric/pull/4862/files)) -- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854), [#4856](https://github.com/pyg-team/pytorch_geometric/pull/4856), [#4857](https://github.com/pyg-team/pytorch_geometric/pull/4857), [#4882](https://github.com/pyg-team/pytorch_geometric/pull/4882), [#4883](https://github.com/pyg-team/pytorch_geometric/pull/4883), [#4929](https://github.com/pyg-team/pytorch_geometric/pull/4929), [#4992](https://github.com/pyg-team/pytorch_geometric/pull/4922), [#4962](https://github.com/pyg-team/pytorch_geometric/pull/4962), [#4968](https://github.com/pyg-team/pytorch_geometric/pull/4968), [#5037](https://github.com/pyg-team/pytorch_geometric/pull/5037), [#5088](https://github.com/pyg-team/pytorch_geometric/pull/5088), [#5270](https://github.com/pyg-team/pytorch_geometric/pull/5270), [#5307](https://github.com/pyg-team/pytorch_geometric/pull/5307), [#5318](https://github.com/pyg-team/pytorch_geometric/pull/5318)) -- Added a `normalize` parameter to `dense_diff_pool` ([#4847](https://github.com/pyg-team/pytorch_geometric/pull/4847)) -- Added `size=None` explanation to jittable `MessagePassing` modules in the documentation ([#4850](https://github.com/pyg-team/pytorch_geometric/pull/4850)) -- Added documentation to the `DataLoaderIterator` class ([#4838](https://github.com/pyg-team/pytorch_geometric/pull/4838)) -- Added `GraphStore` support to `Data` and `HeteroData` ([#4816](https://github.com/pyg-team/pytorch_geometric/pull/4816)) -- Added `FeatureStore` support to `Data` and `HeteroData` ([#4807](https://github.com/pyg-team/pytorch_geometric/pull/4807), [#4853](https://github.com/pyg-team/pytorch_geometric/pull/4853)) -- Added `FeatureStore` and `GraphStore` abstractions ([#4534](https://github.com/pyg-team/pytorch_geometric/pull/4534), [#4568](https://github.com/pyg-team/pytorch_geometric/pull/4568), [#5120](https://github.com/pyg-team/pytorch_geometric/pull/5120)) -- Added support for dense aggregations in `global_*_pool` ([#4827](https://github.com/pyg-team/pytorch_geometric/pull/4827)) -- Added Python version requirement ([#4825](https://github.com/pyg-team/pytorch_geometric/pull/4825)) -- Added TorchScript support to `JumpingKnowledge` module ([#4805](https://github.com/pyg-team/pytorch_geometric/pull/4805)) -- Added a `max_sample` argument to `AddMetaPaths` in order to tackle very dense metapath edges ([#4750](https://github.com/pyg-team/pytorch_geometric/pull/4750)) -- Test `HANConv` with empty tensors ([#4756](https://github.com/pyg-team/pytorch_geometric/pull/4756), [#4841](https://github.com/pyg-team/pytorch_geometric/pull/4841)) -- Added the `bias` vector to the `GCN` model definition in the "Create Message Passing Networks" tutorial ([#4755](https://github.com/pyg-team/pytorch_geometric/pull/4755)) -- Added `transforms.RootedSubgraph` interface with two implementations: `RootedEgoNets` and `RootedRWSubgraph` ([#3926](https://github.com/pyg-team/pytorch_geometric/pull/3926)) -- Added `ptr` vectors for `follow_batch` attributes within `Batch.from_data_list` ([#4723](https://github.com/pyg-team/pytorch_geometric/pull/4723)) -- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762), [#4749](https://github.com/pyg-team/pytorch_geometric/pull/4749), [#4779](https://github.com/pyg-team/pytorch_geometric/pull/4779), [#4863](https://github.com/pyg-team/pytorch_geometric/pull/4863), [#4864](https://github.com/pyg-team/pytorch_geometric/pull/4864), [#4865](https://github.com/pyg-team/pytorch_geometric/pull/4865), [#4866](https://github.com/pyg-team/pytorch_geometric/pull/4866), [#4872](https://github.com/pyg-team/pytorch_geometric/pull/4872), [#4934](https://github.com/pyg-team/pytorch_geometric/pull/4934), [#4935](https://github.com/pyg-team/pytorch_geometric/pull/4935), [#4957](https://github.com/pyg-team/pytorch_geometric/pull/4957), [#4973](https://github.com/pyg-team/pytorch_geometric/pull/4973), [#4973](https://github.com/pyg-team/pytorch_geometric/pull/4973), [#4986](https://github.com/pyg-team/pytorch_geometric/pull/4986), [#4995](https://github.com/pyg-team/pytorch_geometric/pull/4995), [#5000](https://github.com/pyg-team/pytorch_geometric/pull/5000), [#5034](https://github.com/pyg-team/pytorch_geometric/pull/5034), [#5036](https://github.com/pyg-team/pytorch_geometric/pull/5036), [#5039](https://github.com/pyg-team/pytorch_geometric/issues/5039), [#4522](https://github.com/pyg-team/pytorch_geometric/pull/4522), [#5033](https://github.com/pyg-team/pytorch_geometric/pull/5033]), [#5085](https://github.com/pyg-team/pytorch_geometric/pull/5085), [#5097](https://github.com/pyg-team/pytorch_geometric/pull/5097), [#5099](https://github.com/pyg-team/pytorch_geometric/pull/5099), [#5104](https://github.com/pyg-team/pytorch_geometric/pull/5104), [#5113](https://github.com/pyg-team/pytorch_geometric/pull/5113), [#5130](https://github.com/pyg-team/pytorch_geometric/pull/5130), [#5098](https://github.com/pyg-team/pytorch_geometric/pull/5098), [#5191](https://github.com/pyg-team/pytorch_geometric/pull/5191)) -- Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700), [#4800](https://github.com/pyg-team/pytorch_geometric/pull/4800)) -- Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) -- Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701), [#4715](https://github.com/pyg-team/pytorch_geometric/pull/4715), [#4730](https://github.com/pyg-team/pytorch_geometric/pull/4730)) -- Added benchmarks via [`wandb`](https://wandb.ai/site) ([#4656](https://github.com/pyg-team/pytorch_geometric/pull/4656), [#4672](https://github.com/pyg-team/pytorch_geometric/pull/4672), [#4676](https://github.com/pyg-team/pytorch_geometric/pull/4676)) -- Added `unbatch` functionality ([#4628](https://github.com/pyg-team/pytorch_geometric/pull/4628)) -- Confirm that `to_hetero()` works with custom functions, _e.g._, `dropout_adj` ([4653](https://github.com/pyg-team/pytorch_geometric/pull/4653)) -- Added the `MLP.plain_last=False` option ([4652](https://github.com/pyg-team/pytorch_geometric/pull/4652)) -- Added a check in `HeteroConv` and `to_hetero()` to ensure that `MessagePassing.add_self_loops` is disabled ([4647](https://github.com/pyg-team/pytorch_geometric/pull/4647)) -- Added `HeteroData.subgraph()`, `HeteroData.node_type_subgraph()` and `HeteroData.edge_type_subgraph()` support ([#4635](https://github.com/pyg-team/pytorch_geometric/pull/4635)) -- Added the `AQSOL` dataset ([#4626](https://github.com/pyg-team/pytorch_geometric/pull/4626)) -- Added `HeteroData.node_items()` and `HeteroData.edge_items()` functionality ([#4644](https://github.com/pyg-team/pytorch_geometric/pull/4644)) -- Added PyTorch Lightning support in GraphGym ([#4511](https://github.com/pyg-team/pytorch_geometric/pull/4511), [#4516](https://github.com/pyg-team/pytorch_geometric/pull/4516) [#4531](https://github.com/pyg-team/pytorch_geometric/pull/4531), [#4689](https://github.com/pyg-team/pytorch_geometric/pull/4689), [#4843](https://github.com/pyg-team/pytorch_geometric/pull/4843)) -- Added support for returning embeddings in `MLP` models ([#4625](https://github.com/pyg-team/pytorch_geometric/pull/4625)) -- Added faster initialization of `NeighborLoader` in case edge indices are already sorted (via `is_sorted=True`) ([#4620](https://github.com/pyg-team/pytorch_geometric/pull/4620), [#4702](https://github.com/pyg-team/pytorch_geometric/pull/4702)) -- Added `AddPositionalEncoding` transform ([#4521](https://github.com/pyg-team/pytorch_geometric/pull/4521)) -- Added `HeteroData.is_undirected()` support ([#4604](https://github.com/pyg-team/pytorch_geometric/pull/4604)) -- Added the `Genius` and `Wiki` datasets to `nn.datasets.LINKXDataset` ([#4570](https://github.com/pyg-team/pytorch_geometric/pull/4570), [#4600](https://github.com/pyg-team/pytorch_geometric/pull/4600)) -- Added `nn.aggr.EquilibrumAggregation` implicit global layer ([#4522](https://github.com/pyg-team/pytorch_geometric/pull/4522)) -- Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) -- Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) -- Added `HeteroData` support to the `RemoveIsolatedNodes` transform ([#4479](https://github.com/pyg-team/pytorch_geometric/pull/4479)) -- Added `HeteroData.num_features` functionality ([#4504](https://github.com/pyg-team/pytorch_geometric/pull/4504)) -- Added support for projecting features before propagation in `SAGEConv` ([#4437](https://github.com/pyg-team/pytorch_geometric/pull/4437)) -- Added `Geom-GCN` splits to the `Planetoid` datasets ([#4442](https://github.com/pyg-team/pytorch_geometric/pull/4442)) -- Added a `LinkNeighborLoader` for training scalable link predictions models [#4396](https://github.com/pyg-team/pytorch_geometric/pull/4396), [#4439](https://github.com/pyg-team/pytorch_geometric/pull/4439), [#4441](https://github.com/pyg-team/pytorch_geometric/pull/4441), [#4446](https://github.com/pyg-team/pytorch_geometric/pull/4446), [#4508](https://github.com/pyg-team/pytorch_geometric/pull/4508), [#4509](https://github.com/pyg-team/pytorch_geometric/pull/4509)) -- Added an unsupervised `GraphSAGE` example on `PPI` ([#4416](https://github.com/pyg-team/pytorch_geometric/pull/4416)) -- Added support for `LSTM` aggregation in `SAGEConv` ([#4379](https://github.com/pyg-team/pytorch_geometric/pull/4379)) -- Added support for floating-point labels in `RandomLinkSplit` ([#4311](https://github.com/pyg-team/pytorch_geometric/pull/4311), [#4383](https://github.com/pyg-team/pytorch_geometric/pull/4383)) -- Added support for `torch.data` `DataPipes` ([#4302](https://github.com/pyg-team/pytorch_geometric/pull/4302), [#4345](https://github.com/pyg-team/pytorch_geometric/pull/4345), [#4349](https://github.com/pyg-team/pytorch_geometric/pull/4349)) -- Added support for the `cosine` argument in the `KNNGraph`/`RadiusGraph` transforms ([#4344](https://github.com/pyg-team/pytorch_geometric/pull/4344)) -- Added support graph-level attributes in `networkx` conversion ([#4343](https://github.com/pyg-team/pytorch_geometric/pull/4343)) -- Added support for renaming node types via `HeteroData.rename` ([#4329](https://github.com/pyg-team/pytorch_geometric/pull/4329)) -- Added an example to load a trained PyG model in C++ ([#4307](https://github.com/pyg-team/pytorch_geometric/pull/4307)) -- Added a `MessagePassing.explain_message` method to customize making explanations on messages ([#4278](https://github.com/pyg-team/pytorch_geometric/pull/4278), [#4448](https://github.com/pyg-team/pytorch_geometric/pull/4448))) -- Added support for `GATv2Conv` in the `nn.models.GAT` model ([#4357](https://github.com/pyg-team/pytorch_geometric/pull/4357)) -- Added `HeteroData.subgraph` functionality ([#4243](https://github.com/pyg-team/pytorch_geometric/pull/4243)) -- Added the `MaskLabel` module and a corresponding masked label propagation example ([#4197](https://github.com/pyg-team/pytorch_geometric/pull/4197)) -- Added temporal sampling support to `NeighborLoader` ([#4025](https://github.com/pyg-team/pytorch_geometric/pull/4025)) -- Added an example for unsupervised heterogeneous graph learning based on "Deep Multiplex Graph Infomax" ([#3189](https://github.com/pyg-team/pytorch_geometric/pull/3189)) - -### Changed - -- Changed docstring for `RandomLinkSplit` ([#5190](https://github.com/pyg-team/pytorch_geometric/issues/5190)) -- Switched to PyTorch `scatter_reduce` implementation - experimental feature ([#5120](https://github.com/pyg-team/pytorch_geometric/pull/5120)) -- Fixed `RGATConv` device mismatches for `f-scaled` mode ([#5187](https://github.com/pyg-team/pytorch_geometric/pull/5187)] -- Allow for multi-dimensional `edge_labels` in `LinkNeighborLoader` ([#5186](https://github.com/pyg-team/pytorch_geometric/pull/5186)] -- Fixed `GINEConv` bug with non-sequential input ([#5154](https://github.com/pyg-team/pytorch_geometric/pull/5154)] -- Improved error message ([#5095](https://github.com/pyg-team/pytorch_geometric/pull/5095)) -- Fixed `HGTLoader` bug which produced outputs with missing edge types ([#5067](https://github.com/pyg-team/pytorch_geometric/pull/5067)) -- Fixed dynamic inheritance issue in data batching ([#5051](https://github.com/pyg-team/pytorch_geometric/pull/5051)) -- Fixed `load_state_dict` in `Linear` with `strict=False` mode ([5094](https://github.com/pyg-team/pytorch_geometric/pull/5094)) -- Fixed typo in `MaskLabel.ratio_mask` ([5093](https://github.com/pyg-team/pytorch_geometric/pull/5093)) -- Fixed `data.num_node_features` computation for sparse matrices ([5089](https://github.com/pyg-team/pytorch_geometric/pull/5089)) -- Fixed `torch.fx` bug with `torch.nn.aggr` package ([#5021](https://github.com/pyg-team/pytorch_geometric/pull/5021))) -- Fixed `GenConv` test ([4993](https://github.com/pyg-team/pytorch_geometric/pull/4993)) -- Fixed packaging tests for Python 3.10 ([4982](https://github.com/pyg-team/pytorch_geometric/pull/4982)) -- Changed `act_dict` (part of `graphgym`) to create individual instances instead of reusing the same ones everywhere ([4978](https://github.com/pyg-team/pytorch_geometric/pull/4978)) -- Fixed issue where one-hot tensors were passed to `F.one_hot` ([4970](https://github.com/pyg-team/pytorch_geometric/pull/4970)) -- Fixed `bool` arugments in `argparse` in `benchmark/` ([#4967](https://github.com/pyg-team/pytorch_geometric/pull/4967)) -- Fixed `BasicGNN` for `num_layers=1`, which now respects a desired number of `out_channels` ([#4943](https://github.com/pyg-team/pytorch_geometric/pull/4943)) -- `len(batch)` will now return the number of graphs inside the batch, not the number of attributes ([#4931](https://github.com/pyg-team/pytorch_geometric/pull/4931)) -- Fixed `data.subgraph` generation for 0-dim tensors ([#4932](https://github.com/pyg-team/pytorch_geometric/pull/4932)) -- Removed unnecssary inclusion of self-loops when sampling negative edges ([#4880](https://github.com/pyg-team/pytorch_geometric/pull/4880)) -- Fixed `InMemoryDataset` inferring wrong `len` for lists of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) -- Fixed `Batch.separate` when using it for lists of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) -- Correct docstring for SAGEConv ([#4852](https://github.com/pyg-team/pytorch_geometric/pull/4852)) -- Fixed a bug in `TUDataset` where `pre_filter` was not applied whenever `pre_transform` was present -- Renamed `RandomTranslate` to `RandomJitter` - the usage of `RandomTranslate` is now deprecated ([#4828](https://github.com/pyg-team/pytorch_geometric/pull/4828)) -- Do not allow accessing edge types in `HeteroData` with two node types when there exists multiple relations between these types ([#4782](https://github.com/pyg-team/pytorch_geometric/pull/4782)) -- Allow `edge_type == rev_edge_type` argument in `RandomLinkSplit` ([#4757](https://github.com/pyg-team/pytorch_geometric/pull/4757), [#5221](https://github.com/pyg-team/pytorch_geometric/pull/5221)) -- Fixed a numerical instability in the `GeneralConv` and `neighbor_sample` tests ([#4754](https://github.com/pyg-team/pytorch_geometric/pull/4754)) -- Fixed a bug in `HANConv` in which destination node features rather than source node features were propagated ([#4753](https://github.com/pyg-team/pytorch_geometric/pull/4753)) -- Fixed versions of `checkout` and `setup-python` in CI ([#4751](https://github.com/pyg-team/pytorch_geometric/pull/4751)) -- Fixed `protobuf` version ([#4719](https://github.com/pyg-team/pytorch_geometric/pull/4719)) -- Fixed the ranking protocol bug in the RGCN link prediction example ([#4688](https://github.com/pyg-team/pytorch_geometric/pull/4688)) -- Math support in Markdown ([#4683](https://github.com/pyg-team/pytorch_geometric/pull/4683)) -- Allow for `setter` properties in `Data` ([#4682](https://github.com/pyg-team/pytorch_geometric/pull/4682), [#4686](https://github.com/pyg-team/pytorch_geometric/pull/4686)) -- Allow for optional `edge_weight` in `GCN2Conv` ([#4670](https://github.com/pyg-team/pytorch_geometric/pull/4670)) -- Fixed the interplay between `TUDataset` and `pre_transform` that modify node features ([#4669](https://github.com/pyg-team/pytorch_geometric/pull/4669)) -- Make use of the `pyg_sphinx_theme` documentation template ([#4664](https://github.com/pyg-team/pyg-lib/pull/4664), [#4667](https://github.com/pyg-team/pyg-lib/pull/4667)) -- Refactored reading molecular positions from sdf file for qm9 datasets ([4654](https://github.com/pyg-team/pytorch_geometric/pull/4654)) -- Fixed `MLP.jittable()` bug in case `return_emb=True` ([#4645](https://github.com/pyg-team/pytorch_geometric/pull/4645), [#4648](https://github.com/pyg-team/pytorch_geometric/pull/4648)) -- The generated node features of `StochasticBlockModelDataset` are now ordered with respect to their labels ([#4617](https://github.com/pyg-team/pytorch_geometric/pull/4617)) -- Fixed typos in the documentation ([#4616](https://github.com/pyg-team/pytorch_geometric/pull/4616), [#4824](https://github.com/pyg-team/pytorch_geometric/pull/4824), [#4895](https://github.com/pyg-team/pytorch_geometric/pull/4895), [#5161](https://github.com/pyg-team/pytorch_geometric/pull/5161)) -- The `bias` argument in `TAGConv` is now actually applied ([#4597](https://github.com/pyg-team/pytorch_geometric/pull/4597)) -- Fixed subclass behavior of `process` and `download` in `Datsaet` ([#4586](https://github.com/pyg-team/pytorch_geometric/pull/4586)) -- Fixed filtering of attributes for loaders in case `__cat_dim__ != 0` ([#4629](https://github.com/pyg-team/pytorch_geometric/pull/4629)) -- Fixed `SparseTensor` support in `NeighborLoader` ([#4320](https://github.com/pyg-team/pytorch_geometric/pull/4320)) -- Fixed average degree handling in `PNAConv` ([#4312](https://github.com/pyg-team/pytorch_geometric/pull/4312)) -- Fixed a bug in `from_networkx` in case some attributes are PyTorch tensors ([#4486](https://github.com/pyg-team/pytorch_geometric/pull/4486)) -- Added a missing clamp in `DimeNet` ([#4506](https://github.com/pyg-team/pytorch_geometric/pull/4506), [#4562](https://github.com/pyg-team/pytorch_geometric/pull/4562)) -- Fixed the download link in `DBP15K` ([#4428](https://github.com/pyg-team/pytorch_geometric/pull/4428)) -- Fixed an autograd bug in `DimeNet` when resetting parameters ([#4424](https://github.com/pyg-team/pytorch_geometric/pull/4424)) -- Fixed bipartite message passing in case `flow="target_to_source"` ([#4418](https://github.com/pyg-team/pytorch_geometric/pull/4418)) -- Fixed a bug in which `num_nodes` was not properly updated in the `FixedPoints` transform ([#4394](https://github.com/pyg-team/pytorch_geometric/pull/4394)) -- PyTorch Lightning >= 1.6 support ([#4377](https://github.com/pyg-team/pytorch_geometric/pull/4377)) -- Fixed a bug in which `GATConv` was not jittable ([#4347](https://github.com/pyg-team/pytorch_geometric/pull/4347)) -- Fixed a bug in which the GraphGym config was not stored in each specific experiment directory ([#4338](https://github.com/pyg-team/pytorch_geometric/pull/4338)) -- Fixed a bug in which `nn.models.GAT` did not produce `out_channels`-many output channels ([#4299](https://github.com/pyg-team/pytorch_geometric/pull/4299)) -- Fixed mini-batching with empty lists as attributes ([#4293](https://github.com/pyg-team/pytorch_geometric/pull/4293)) -- Fixed a bug in which `GCNConv` could not be combined with `to_hetero` on heterogeneous graphs with one node type ([#4279](https://github.com/pyg-team/pytorch_geometric/pull/4279)) - -### Removed - -- Remove internal metrics in favor of `torchmetrics` ([#4287](https://github.com/pyg-team/pytorch_geometric/pull/4287)) diff --git a/pytorch_geometric-2.3.1/MANIFEST.in b/pytorch_geometric-2.3.1/MANIFEST.in deleted file mode 100644 index 34584b3..0000000 --- a/pytorch_geometric-2.3.1/MANIFEST.in +++ /dev/null @@ -1,9 +0,0 @@ -include README.md -include LICENSE - -recursive-include torch_geometric *.jinja - -recursive-exclude test * -recursive-exclude examples * -recursive-exclude docs * -recursive-exclude benchmark * diff --git a/pytorch_geometric-2.3.1/README.md b/pytorch_geometric-2.3.1/README.md deleted file mode 100644 index 22d6c63..0000000 --- a/pytorch_geometric-2.3.1/README.md +++ /dev/null @@ -1,462 +0,0 @@ -[pypi-image]: https://badge.fury.io/py/torch-geometric.svg -[pypi-url]: https://pypi.python.org/pypi/torch-geometric -[testing-image]: https://github.com/pyg-team/pytorch_geometric/actions/workflows/testing.yml/badge.svg -[testing-url]: https://github.com/pyg-team/pytorch_geometric/actions/workflows/testing.yml -[linting-image]: https://github.com/pyg-team/pytorch_geometric/actions/workflows/linting.yml/badge.svg -[linting-url]: https://github.com/pyg-team/pytorch_geometric/actions/workflows/linting.yml -[docs-image]: https://readthedocs.org/projects/pytorch-geometric/badge/?version=latest -[docs-url]: https://pytorch-geometric.readthedocs.io/en/latest -[coverage-image]: https://codecov.io/gh/pyg-team/pytorch_geometric/branch/master/graph/badge.svg -[coverage-url]: https://codecov.io/github/pyg-team/pytorch_geometric?branch=master -[contributing-image]: https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat -[contributing-url]: https://github.com/pyg-team/pytorch_geometric/blob/master/.github/CONTRIBUTING.md -[slack-image]: https://img.shields.io/badge/slack-pyg-brightgreen -[slack-url]: https://data.pyg.org/slack.html - -

- -

- --------------------------------------------------------------------------------- - -[![PyPI Version][pypi-image]][pypi-url] -[![Testing Status][testing-image]][testing-url] -[![Linting Status][linting-image]][linting-url] -[![Docs Status][docs-image]][docs-url] -[![Contributing][contributing-image]][contributing-url] -[![Slack][slack-image]][slack-url] - -**[Documentation](https://pytorch-geometric.readthedocs.io)** | **[Paper](https://arxiv.org/abs/1903.02428)** | **[Colab Notebooks and Video Tutorials](https://pytorch-geometric.readthedocs.io/en/latest/get_started/colabs.html)** | **[External Resources](https://pytorch-geometric.readthedocs.io/en/latest/external/resources.html)** | **[OGB Examples](https://github.com/snap-stanford/ogb/tree/master/examples)** - -**PyG** *(PyTorch Geometric)* is a library built upon [PyTorch](https://pytorch.org/) to easily write and train Graph Neural Networks (GNNs) for a wide range of applications related to structured data. - -It consists of various methods for deep learning on graphs and other irregular structures, also known as *[geometric deep learning](http://geometricdeeplearning.com/)*, from a variety of published papers. -In addition, it consists of easy-to-use mini-batch loaders for operating on many small and single giant graphs, [multi GPU-support](https://github.com/pyg-team/pytorch_geometric/tree/master/examples/multi_gpu), [`torch.compile`](https://pytorch-geometric.readthedocs.io/en/latest/tutorial/compile.html) support, [`DataPipe`](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/datapipe.py) support, a large number of common benchmark datasets (based on simple interfaces to create your own), the [GraphGym](https://pytorch-geometric.readthedocs.io/en/latest/advanced/graphgym.html) experiment manager, and helpful transforms, both for learning on arbitrary graphs as well as on 3D meshes or point clouds. - -**[Click here to join our Slack community!][slack-url]** - -

- -

- --------------------------------------------------------------------------------- - -* [Library Highlights](#library-highlights) -* [Quick Tour for New Users](#quick-tour-for-new-users) -* [Architecture Overview](#architecture-overview) -* [Implemented GNN Models](#implemented-gnn-models) -* [Installation](#installation) - -## Library Highlights - -Whether you are a machine learning researcher or first-time user of machine learning toolkits, here are some reasons to try out PyG for machine learning on graph-structured data. - -* **Easy-to-use and unified API**: - All it takes is 10-20 lines of code to get started with training a GNN model (see the next section for a [quick tour](#quick-tour-for-new-users)). - PyG is *PyTorch-on-the-rocks*: It utilizes a tensor-centric API and keeps design principles close to vanilla PyTorch. - If you are already familiar with PyTorch, utilizing PyG is straightforward. -* **Comprehensive and well-maintained GNN models**: - Most of the state-of-the-art Graph Neural Network architectures have been implemented by library developers or authors of research papers and are ready to be applied. -* **Great flexibility**: - Existing PyG models can easily be extended for conducting your own research with GNNs. - Making modifications to existing models or creating new architectures is simple, thanks to its easy-to-use message passing API, and a variety of operators and utility functions. -* **Large-scale real-world GNN models**: - We focus on the need of GNN applications in challenging real-world scenarios, and support learning on diverse types of graphs, including but not limited to: scalable GNNs for graphs with millions of nodes; dynamic GNNs for node predictions over time; heterogeneous GNNs with multiple node types and edge types. -* **GraphGym integration**: GraphGym lets users easily reproduce GNN experiments, is able to launch and analyze thousands of different GNN configurations, and is customizable by registering new modules to a GNN learning pipeline. - -## Quick Tour for New Users - -In this quick tour, we highlight the ease of creating and training a GNN model with only a few lines of code. - -### Train your own GNN model - -In the first glimpse of PyG, we implement the training of a GNN for classifying papers in a citation graph. -For this, we load the [Cora](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.datasets.Planetoid.html) dataset, and create a simple 2-layer GCN model using the pre-defined [`GCNConv`](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GCNConv.html): - -```python -import torch -from torch import Tensor -from torch_geometric.nn import GCNConv -from torch_geometric.datasets import Planetoid - -dataset = Planetoid(root='.', name='Cora') - -class GCN(torch.nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels): - super().__init__() - self.conv1 = GCNConv(in_channels, hidden_channels) - self.conv2 = GCNConv(hidden_channels, out_channels) - - def forward(self, x: Tensor, edge_index: Tensor) -> Tensor: - # x: Node feature matrix of shape [num_nodes, in_channels] - # edge_index: Graph connectivity matrix of shape [2, num_edges] - x = self.conv1(x, edge_index).relu() - x = self.conv2(x, edge_index) - return x - -model = GCN(dataset.num_features, 16, dataset.num_classes) -``` - -
-We can now optimize the model in a training loop, similar to the standard PyTorch training procedure. - -```python -import torch.nn.functional as F - -data = dataset[0] -optimizer = torch.optim.Adam(model.parameters(), lr=0.01) - -for epoch in range(200): - pred = model(data.x, data.edge_index) - loss = F.cross_entropy(pred[data.train_mask], data.y[data.train_mask]) - - # Backpropagation - optimizer.zero_grad() - loss.backward() - optimizer.step() -``` -
- -More information about evaluating final model performance can be found in the corresponding [example](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/gcn.py). - -### Create your own GNN layer - -In addition to the easy application of existing GNNs, PyG makes it simple to implement custom Graph Neural Networks (see [here](https://pytorch-geometric.readthedocs.io/en/latest/tutorial/create_gnn.html) for the accompanying tutorial). -For example, this is all it takes to implement the [edge convolutional layer](https://arxiv.org/abs/1801.07829) from Wang *et al.*: - -$$x_i^{\prime} ~ = ~ \max_{j \in \mathcal{N}(i)} ~ \textrm{MLP}_{\theta} \left( [ ~ x_i, ~ x_j - x_i ~ ] \right)$$ - -```python -import torch -from torch import Tensor -from torch.nn import Sequential, Linear, ReLU -from torch_geometric.nn import MessagePassing - -class EdgeConv(MessagePassing): - def __init__(self, in_channels, out_channels): - super().__init__(aggr="max") # "Max" aggregation. - self.mlp = Sequential( - Linear(2 * in_channels, out_channels), - ReLU(), - Linear(out_channels, out_channels), - ) - - def forward(self, x: Tensor, edge_index: Tensor) -> Tensor: - # x: Node feature matrix of shape [num_nodes, in_channels] - # edge_index: Graph connectivity matrix of shape [2, num_edges] - return self.propagate(edge_index, x=x) # shape [num_nodes, out_channels] - - def message(self, x_j: Tensor, x_i: Tensor) -> Tensor: - # x_j: Source node features of shape [num_edges, in_channels] - # x_i: Target node features of shape [num_edges, in_channels] - edge_features = torch.cat([x_i, x_j - x_i], dim=-1) - return self.mlp(edge_features) # shape [num_edges, out_channels] -``` - -### Manage experiments with GraphGym - -GraphGym allows you to manage and launch GNN experiments, using a highly modularized pipeline (see [here](https://pytorch-geometric.readthedocs.io/en/latest/advanced/graphgym.html) for the accompanying tutorial). - -``` -git clone https://github.com/pyg-team/pytorch_geometric.git -cd pytorch_geometric/graphgym -bash run_single.sh # run a single GNN experiment (node/edge/graph-level) -bash run_batch.sh # run a batch of GNN experiments, using differnt GNN designs/datasets/tasks -``` - -Users are highly encouraged to check out the [documentation](https://pytorch-geometric.readthedocs.io/en/latest), which contains additional tutorials on the essential functionalities of PyG, including data handling, creation of datasets and a full list of implemented methods, transforms, and datasets. -For a quick start, check out our [examples](https://github.com/pyg-team/pytorch_geometric/tree/master/examples) in `examples/`. - -## Architecture Overview - -PyG provides a multi-layer framework that enables users to build Graph Neural Network solutions on both low and high levels. -It comprises of the following components: - -* The PyG **engine** utilizes the powerful PyTorch deep learning framework with full [`torch.compile`](https://pytorch-geometric.readthedocs.io/en/latest/tutorial/compile.html) and [TorchScript](https://pytorch-geometric.readthedocs.io/en/latest/advanced/jit.html) support, as well as additions of efficient CPU/CUDA libraries for operating on sparse data, *e.g.*, [`pyg-lib`](https://github.com/pyg-team/pyg-lib). -* The PyG **storage** handles data processing, transformation and loading pipelines. It is capable of handling and processing large-scale graph datasets, and provides effective solutions for heterogeneous graphs. It further provides a variety of sampling solutions, which enable training of GNNs on large-scale graphs. -* The PyG **operators** bundle essential functionalities for implementing Graph Neural Networks. PyG supports important GNN building blocks that can be combined and applied to various parts of a GNN model, ensuring rich flexibility of GNN design. -* Finally, PyG provides an abundant set of GNN **models**, and examples that showcase GNN models on standard graph benchmarks. Thanks to its flexibility, users can easily build and modify custom GNN models to fit their specific needs. - -

- -

- -## Implemented GNN Models - -We list currently supported PyG models, layers and operators according to category: - -**GNN layers:** -All Graph Neural Network layers are implemented via the **[`nn.MessagePassing`](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.MessagePassing.html)** interface. -A GNN layer specifies how to perform message passing, *i.e.* by designing different message, aggregation and update functions as defined [here](https://pytorch-geometric.readthedocs.io/en/latest/tutorial/create_gnn.html). -These GNN layers can be stacked together to create Graph Neural Network models. - -* **[GCNConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GCNConv.html)** from Kipf and Welling: [Semi-Supervised Classification with Graph Convolutional Networks](https://arxiv.org/abs/1609.02907) (ICLR 2017) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/gcn.py)] -* **[ChebConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.ChebConv.html)** from Defferrard *et al.*: [Convolutional Neural Networks on Graphs with Fast Localized Spectral Filtering](https://arxiv.org/abs/1606.09375) (NIPS 2016) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/gcn.py#L36-L37)] -* **[GATConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GATConv.html)** from Veličković *et al.*: [Graph Attention Networks](https://arxiv.org/abs/1710.10903) (ICLR 2018) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/gat.py)] - -
-Expand to see all implemented GNN layers... - -* **[GCN2Conv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GCN2Conv.html)** from Chen *et al.*: [Simple and Deep Graph Convolutional Networks](https://arxiv.org/abs/2007.02133) (ICML 2020) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/gcn2_cora.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/gcn2_ppi.py)] -* **[SplineConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.SplineConv.html)** from Fey *et al.*: [SplineCNN: Fast Geometric Deep Learning with Continuous B-Spline Kernels](https://arxiv.org/abs/1711.08920) (CVPR 2018) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/cora.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/faust.py)] -* **[NNConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.NNConv.html)** from Gilmer *et al.*: [Neural Message Passing for Quantum Chemistry](https://arxiv.org/abs/1704.01212) (ICML 2017) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/qm9_nn_conv.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/mnist_nn_conv.py)] -* **[CGConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.CGConv.html)** from Xie and Grossman: [Crystal Graph Convolutional Neural Networks for an Accurate and Interpretable Prediction of Material Properties](https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.120.145301) (Physical Review Letters 120, 2018) -* **[ECConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.ECConv.html)** from Simonovsky and Komodakis: [Edge-Conditioned Convolution on Graphs](https://arxiv.org/abs/1704.02901) (CVPR 2017) -* **[EGConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.EGConv.html)** from Tailor *et al.*: [Adaptive Filters and Aggregator Fusion for Efficient Graph Convolutions](https://arxiv.org/abs/2104.01481) (GNNSys 2021) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/egc.py)] -* **[GATv2Conv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GATv2Conv.html)** from Brody *et al.*: [How Attentive are Graph Attention Networks?](https://arxiv.org/abs/2105.14491) (ICLR 2022) -* **[TransformerConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.TransformerConv.html)** from Shi *et al.*: [Masked Label Prediction: Unified Message Passing Model for Semi-Supervised Classification](https://arxiv.org/abs/2009.03509) (CoRR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/unimp_arxiv.py)] -* **[SAGEConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.SAGEConv.html)** from Hamilton *et al.*: [Inductive Representation Learning on Large Graphs](https://arxiv.org/abs/1706.02216) (NIPS 2017) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/reddit.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/ogbn_products_sage.py), [**Example3**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/graph_sage_unsup.py), [**Example4**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/graph_sage_unsup_ppi.py)] -* **[GraphConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GraphConv.html)** from, *e.g.*, Morris *et al.*: [Weisfeiler and Leman Go Neural: Higher-order Graph Neural Networks](https://arxiv.org/abs/1810.02244) (AAAI 2019) -* **[GatedGraphConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GatedGraphConv.html)** from Li *et al.*: [Gated Graph Sequence Neural Networks](https://arxiv.org/abs/1511.05493) (ICLR 2016) -* **[ResGatedGraphConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.ResGatedGraphConv.html)** from Bresson and Laurent: [Residual Gated Graph ConvNets](https://arxiv.org/abs/1711.07553) (CoRR 2017) -* **[GINConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GINConv.html)** from Xu *et al.*: [How Powerful are Graph Neural Networks?](https://arxiv.org/abs/1810.00826) (ICLR 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/mutag_gin.py)] -* **[GINEConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GINEConv.html)** from Hu *et al.*: [Strategies for Pre-training Graph Neural Networks](https://arxiv.org/abs/1905.12265) (ICLR 2020) -* **[ARMAConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.ARMAConv.html)** from Bianchi *et al.*: [Graph Neural Networks with Convolutional ARMA Filters](https://arxiv.org/abs/1901.01343) (CoRR 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/arma.py)] -* **[SGConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.SGConv.html)** from Wu *et al.*: [Simplifying Graph Convolutional Networks](https://arxiv.org/abs/1902.07153) (CoRR 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/sgc.py)] -* **[APPNP](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.APPNP.html)** from Klicpera *et al.*: [Predict then Propagate: Graph Neural Networks meet Personalized PageRank](https://arxiv.org/abs/1810.05997) (ICLR 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/citation/appnp.py)] -* **[MFConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.MFConv.html)** from Duvenaud *et al.*: [Convolutional Networks on Graphs for Learning Molecular Fingerprints](https://arxiv.org/abs/1509.09292) (NIPS 2015) -* **[AGNNConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.AGNNConv.html)** from Thekumparampil *et al.*: [Attention-based Graph Neural Network for Semi-Supervised Learning](https://arxiv.org/abs/1803.03735) (CoRR 2017) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/agnn.py)] -* **[TAGConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.TAGConv.html)** from Du *et al.*: [Topology Adaptive Graph Convolutional Networks](https://arxiv.org/abs/1710.10370) (CoRR 2017) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/tagcn.py)] -* **[PNAConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.PNAConv.html)** from Corso *et al.*: [Principal Neighbourhood Aggregation for Graph Nets](https://arxiv.org/abs/2004.05718) (CoRR 2020) [**[Example](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/pna.py)**] -* **[FAConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.FAConv.html)** from Bo *et al.*: [Beyond Low-Frequency Information in Graph Convolutional Networks](https://arxiv.org/abs/2101.00797) (AAAI 2021) -* **[PDNConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.nn.conv.PDNConv.html)** from Rozemberczki *et al.*: [Pathfinder Discovery Networks for Neural Message Passing](https://arxiv.org/abs/2010.12878) (WWW 2021) -* **[RGCNConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.RGCNConv.html)** from Schlichtkrull *et al.*: [Modeling Relational Data with Graph Convolutional Networks](https://arxiv.org/abs/1703.06103) (ESWC 2018) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/rgcn.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/rgcn_link_pred.py)] -* **[RGATConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.RGATConv.html)** from Busbridge *et al.*: [Relational Graph Attention Networks](https://arxiv.org/abs/1904.05811) (CoRR 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/rgat.py)] -* **[FiLMConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.FiLMConv.html)** from Brockschmidt: [GNN-FiLM: Graph Neural Networks with Feature-wise Linear Modulation](https://arxiv.org/abs/1906.12192) (ICML 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/film.py)] -* **[SignedConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.SignedConv.html)** from Derr *et al.*: [Signed Graph Convolutional Network](https://arxiv.org/abs/1808.06354) (ICDM 2018) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/signed_gcn.py)] -* **[DNAConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.DNAConv.html)** from Fey: [Just Jump: Dynamic Neighborhood Aggregation in Graph Neural Networks](https://arxiv.org/abs/1904.04849) (ICLR-W 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/dna.py)] -* **[PANConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.PANConv.html)** from Ma *et al.*: [Path Integral Based Convolution and Pooling for Graph Neural Networks](https://arxiv.org/abs/2006.16811) (NeurIPS 2020) -* **[PointNetConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.PointNetConv.html)** (including **[Iterative Farthest Point Sampling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.fps.html)**, dynamic graph generation based on **[nearest neighbor](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.knn_graph.html)** or **[maximum distance](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.radius_graph.html)**, and **[k-NN interpolation](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.unpool.knn_interpolate.html)** for upsampling) from Qi *et al.*: [PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation](https://arxiv.org/abs/1612.00593) (CVPR 2017) and [PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space](https://arxiv.org/abs/1706.02413) (NIPS 2017) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/pointnet2_classification.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/pointnet2_segmentation.py)] -* **[EdgeConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.EdgeConv.html)** from Wang *et al.*: [Dynamic Graph CNN for Learning on Point Clouds](https://arxiv.org/abs/1801.07829) (CoRR, 2018) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/dgcnn_classification.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/dgcnn_segmentation.py)] -* **[XConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.XConv.html)** from Li *et al.*: [PointCNN: Convolution On X-Transformed Points](https://arxiv.org/abs/1801.07791) (NeurIPS 2018) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/points/point_cnn.py)] -* **[PPFConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.PPFConv.html)** from Deng *et al.*: [PPFNet: Global Context Aware Local Features for Robust 3D Point Matching](https://arxiv.org/abs/1802.02669) (CVPR 2018) -* **[GMMConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GMMConv.html)** from Monti *et al.*: [Geometric Deep Learning on Graphs and Manifolds using Mixture Model CNNs](https://arxiv.org/abs/1611.08402) (CVPR 2017) -* **[FeaStConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.FeaStConv.html)** from Verma *et al.*: [FeaStNet: Feature-Steered Graph Convolutions for 3D Shape Analysis](https://arxiv.org/abs/1706.05206) (CVPR 2018) -* **[PointTransformerConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.PointTransformerConv.html)** from Zhao *et al.*: [Point Transformer](https://arxiv.org/abs/2012.09164) (2020) -* **[HypergraphConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.HypergraphConv.html)** from Bai *et al.*: [Hypergraph Convolution and Hypergraph Attention](https://arxiv.org/abs/1901.08150) (CoRR 2019) -* **[GravNetConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GravNetConv.html)** from Qasim *et al.*: [Learning Representations of Irregular Particle-detector Geometry with Distance-weighted Graph Networks](https://arxiv.org/abs/1902.07987) (European Physics Journal C, 2019) -* **[SuperGAT](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.SuperGATConv.html)** from Kim and Oh: [How To Find Your Friendly Neighborhood: Graph Attention Design With Self-Supervision](https://openreview.net/forum?id=Wi5KUNlqWty) (ICLR 2021) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/super_gat.py)] -* **[HGTConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.HGTConv.html)** from Hu *et al.*: [Heterogeneous Graph Transformer](https://arxiv.org/abs/2003.01332) (WWW 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/hetero/hgt_dblp.py)] -* **[HEATConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.HEATonv.html)** from Mo *et al.*: [Heterogeneous Edge-Enhanced Graph Attention Network For Multi-Agent Trajectory Prediction](https://arxiv.org/abs/2106.07161) (CoRR 2021) -* **[SSGConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.SSGConv.html)** from Zhu *et al.*: [Simple Spectral Graph Convolution](https://openreview.net/forum?id=CYO5T-YjWZV) (ICLR 2021) -* **[FusedGATConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.FusedGATConv.html)** from Zhang *et al.*: [Understanding GNN Computational Graph: A Coordinated Computation, IO, and Memory Perspective](https://proceedings.mlsys.org/paper/2022/file/9a1158154dfa42caddbd0694a4e9bdc8-Paper.pdf) (MLSys 2022) -* **[GPSConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GPSConv.html)** from Rampášek *et al.*: [Recipe for a General, Powerful, Scalable Graph Transformer](https://arxiv.org/abs/2205.12454) (NeurIPS 2022) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/graph_gps.py)] -
- -**Pooling layers:** -Graph pooling layers combine the vectorial representations of a set of nodes in a graph (or a subgraph) into a single vector representation that summarizes its properties of nodes. -It is commonly applied to graph-level tasks, which require combining node features into a single graph representation. - -* **[Top-K Pooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.TopKPooling.html)** from Gao and Ji: [Graph U-Nets](https://arxiv.org/abs/1905.05178) (ICML 2019), Cangea *et al.*: [Towards Sparse Hierarchical Graph Classifiers](https://arxiv.org/abs/1811.01287) (NeurIPS-W 2018) and Knyazev *et al.*: [Understanding Attention and Generalization in Graph Neural Networks](https://arxiv.org/abs/1905.02850) (ICLR-W 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/proteins_topk_pool.py)] -* **[DiffPool](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.dense.dense_diff_pool.html)** from Ying *et al.*: [Hierarchical Graph Representation Learning with Differentiable Pooling](https://arxiv.org/abs/1806.08804) (NeurIPS 2018) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/proteins_diff_pool.py)] - -
-Expand to see all implemented pooling layers... - -* **[Attentional Aggregation](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.aggr.AttentionalAggregation.html)** from Li *et al.*: [Graph Matching Networks for Learning the Similarity of Graph Structured Objects](https://arxiv.org/abs/1904.12787) (ICML 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/global_attention.py)] -* **[Set2Set](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.aggr.Set2Set.html)** from Vinyals *et al.*: [Order Matters: Sequence to Sequence for Sets](https://arxiv.org/abs/1511.06391) (ICLR 2016) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/set2set.py)] -* **[Sort Aggregation](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.aggr.SortAggregation.html)** from Zhang *et al.*: [An End-to-End Deep Learning Architecture for Graph Classification](https://www.cse.wustl.edu/~muhan/papers/AAAI_2018_DGCNN.pdf) (AAAI 2018) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/sort_pool.py)] -* **[MinCut Pooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.dense.dense_mincut_pool.html)** from Bianchi *et al.*: [Spectral Clustering with Graph Neural Networks for Graph Pooling](https://arxiv.org/abs/1907.00481) (ICML 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/proteins_mincut_pool.py)] -* **[DMoN Pooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.dense.DMoNPooling.html)** from Tsitsulin *et al.*: [Graph Clustering with Graph Neural Networks](https://arxiv.org/abs/2006.16904) (CoRR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/proteins_dmon_pool.py)] -* **[Graclus Pooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.graclus.html)** from Dhillon *et al.*: [Weighted Graph Cuts without Eigenvectors: A Multilevel Approach](http://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf) (PAMI 2007) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/mnist_graclus.py)] -* **[Voxel Grid Pooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.voxel_grid.html)** from, *e.g.*, Simonovsky and Komodakis: [Dynamic Edge-Conditioned Filters in Convolutional Neural Networks on Graphs](https://arxiv.org/abs/1704.02901) (CVPR 2017) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/mnist_voxel_grid.py)] -* **[SAG Pooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.SAGPooling.html)** from Lee *et al.*: [Self-Attention Graph Pooling](https://arxiv.org/abs/1904.08082) (ICML 2019) and Knyazev *et al.*: [Understanding Attention and Generalization in Graph Neural Networks](https://arxiv.org/abs/1905.02850) (ICLR-W 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/sag_pool.py)] -* **[Edge Pooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.EdgePooling.html)** from Diehl *et al.*: [Towards Graph Pooling by Edge Contraction](https://graphreason.github.io/papers/17.pdf) (ICML-W 2019) and Diehl: [Edge Contraction Pooling for Graph Neural Networks](https://arxiv.org/abs/1905.10990) (CoRR 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/edge_pool.py)] -* **[ASAPooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.ASAPooling.html)** from Ranjan *et al.*: [ASAP: Adaptive Structure Aware Pooling for Learning Hierarchical Graph Representations](https://arxiv.org/abs/1911.07979) (AAAI 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/asap.py)] -* **[PANPooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.PANPooling.html)** from Ma *et al.*: [Path Integral Based Convolution and Pooling for Graph Neural Networks](https://arxiv.org/abs/2006.16811) (NeurIPS 2020) -* **[MemPooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.MemPooling.html)** from Khasahmadi *et al.*: [Memory-Based Graph Networks](https://arxiv.org/abs/2002.09518) (ICLR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/mem_pool.py)] -* **[Graph Multiset Transformer](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.aggr.GraphMultisetTransformer.html)** from Baek *et al.*: [Accurate Learning of Graph Representations with Graph Multiset Pooling](https://arxiv.org/abs/2102.11533) (ICLR 2021) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/proteins_gmt.py)] -* **[Equilibrium Aggregation](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.aggr.EquilibriumAggregation.html)** from Bartunov *et al.*: [](https://arxiv.org/abs/2202.12795) (UAI 2022) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/equilibrium_median.py)] -
- -**GNN models:** -Our supported GNN models incorporate multiple message passing layers, and users can directly use these pre-defined models to make predictions on graphs. -Unlike simple stacking of GNN layers, these models could involve pre-processing, additional learnable parameters, skip connections, graph coarsening, etc. - -* **[SchNet](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.SchNet.html)** from Schütt *et al.*: [SchNet: A Continuous-filter Convolutional Neural Network for Modeling Quantum Interactions](https://arxiv.org/abs/1706.08566) (NIPS 2017) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/qm9_pretrained_schnet.py)] -* **[DimeNet](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.DimeNet.html)** and **[DimeNetPlusPlus](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.DimeNetPlusPlus.html)** from Klicpera *et al.*: [Directional Message Passing for Molecular Graphs](https://arxiv.org/abs/2003.03123) (ICLR 2020) and [Fast and Uncertainty-Aware Directional Message Passing for Non-Equilibrium Molecules](https://arxiv.org/abs/2011.14115) (NeurIPS-W 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/qm9_pretrained_dimenet.py)] -* **[Node2Vec](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.Node2Vec.html)** from Grover and Leskovec: [node2vec: Scalable Feature Learning for Networks](https://arxiv.org/abs/1607.00653) (KDD 2016) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/node2vec.py)] -* **[Deep Graph Infomax](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.DeepGraphInfomax.html)** from Veličković *et al.*: [Deep Graph Infomax](https://arxiv.org/abs/1809.10341) (ICLR 2019) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/infomax_transductive.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/infomax_inductive.py)] -* **Deep Multiplex Graph Infomax** from Park *et al.*: [Unsupervised Attributed Multiplex Network Embedding](https://arxiv.org/abs/1911.06750) (AAAI 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/hetero/dmgi_unsup.py)] -* **[Masked Label Prediction](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.MaskLabel.html)** from Shi *et al.*: [Masked Label Prediction: Unified Message Passing Model for Semi-Supervised Classification](https://arxiv.org/abs/2009.03509) (CoRR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/unimp_arxiv.py)] - -
-Expand to see all implemented GNN models... - -* **[Jumping Knowledge](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.JumpingKnowledge.html)** from Xu *et al.*: [Representation Learning on Graphs with Jumping Knowledge Networks](https://arxiv.org/abs/1806.03536) (ICML 2018) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/gin.py#L54-L106)] -* A **[MetaLayer](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.MetaLayer.html)** for building any kind of graph network similar to the [TensorFlow Graph Nets library](https://github.com/deepmind/graph_nets) from Battaglia *et al.*: [Relational Inductive Biases, Deep Learning, and Graph Networks](https://arxiv.org/abs/1806.01261) (CoRR 2018) -* **[MetaPath2Vec](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.MetaPath2Vec.html)** from Dong *et al.*: [metapath2vec: Scalable Representation Learning for Heterogeneous Networks](https://ericdongyx.github.io/papers/KDD17-dong-chawla-swami-metapath2vec.pdf) (KDD 2017) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/hetero/metapath2vec.py)] -* All variants of **[Graph Autoencoders](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.GAE.html)** and **[Variational Autoencoders](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.VGAE.html)** from: - * [Variational Graph Auto-Encoders](https://arxiv.org/abs/1611.07308) from Kipf and Welling (NIPS-W 2016) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/autoencoder.py)] - * [Adversarially Regularized Graph Autoencoder for Graph Embedding](https://arxiv.org/abs/1802.04407) from Pan *et al.* (IJCAI 2018) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/argva_node_clustering.py)] - * [Simple and Effective Graph Autoencoders with One-Hop Linear Models](https://arxiv.org/abs/2001.07614) from Salha *et al.* (ECML 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/autoencoder.py)] -* **[SEAL](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/seal_link_pred.py)** from Zhang and Chen: [Link Prediction Based on Graph Neural Networks](https://arxiv.org/pdf/1802.09691.pdf) (NeurIPS 2018) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/seal_link_pred.py)] -* **[RENet](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.RENet.html)** from Jin *et al.*: [Recurrent Event Network for Reasoning over Temporal Knowledge Graphs](https://arxiv.org/abs/1904.05530) (ICLR-W 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/renet.py)] -* **[GraphUNet](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.GraphUNet.html)** from Gao and Ji: [Graph U-Nets](https://arxiv.org/abs/1905.05178) (ICML 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/graph_unet.py)] -* **[AttentiveFP](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.AttentiveFP.html)** from Xiong *et al.*: [Pushing the Boundaries of Molecular Representation for Drug Discovery with the Graph Attention Mechanism](https://pubs.acs.org/doi/10.1021/acs.jmedchem.9b00959) (J. Med. Chem. 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/attentive_fp.py)] -* **[DeepGCN](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.DeepGCNLayer.html)** and the **[GENConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GENConv.html)** from Li *et al.*: [DeepGCNs: Can GCNs Go as Deep as CNNs?](https://arxiv.org/abs/1904.03751) (ICCV 2019) and [DeeperGCN: All You Need to Train Deeper GCNs](https://arxiv.org/abs/2006.07739) (CoRR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/ogbn_proteins_deepgcn.py)] -* **[RECT](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.RECT_L.html)** from Wang *et al.*: [Network Embedding with Completely-imbalanced Labels](https://ieeexplore.ieee.org/document/8979355) (TKDE 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/rect.py)] -* **[GNNExplainer](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.explain.algorithm.GNNExplainer.html)** from Ying *et al.*: [GNNExplainer: Generating Explanations for Graph Neural Networks](https://arxiv.org/abs/1903.03894) (NeurIPS 2019) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/explain/gnn_explainer.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/explain/gnn_explainer_ba_shapes.py), [**Example3**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/explain/gnn_explainer_link_pred.py)] -* **Graph-less Neural Networks** from Zhang *et al.*: [Graph-less Neural Networks: Teaching Old MLPs New Tricks via Distillation](https://arxiv.org/abs/2110.08727) (CoRR 2021) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/glnn.py)] -* **[LINKX](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.LINKX.html)** from Lim *et al.*: [Large Scale Learning on Non-Homophilous Graphs: -New Benchmarks and Strong Simple Methods](https://arxiv.org/abs/2110.14446) (NeurIPS 2021) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/linkx.py)] -* **[RevGNN](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.GroupAddRev.html)** from Li *et al.*: [Training Graph Neural with 1000 Layers](https://arxiv.org/abs/2106.07476) (ICML 2021) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/rev_gnn.py)] -* **[TransE](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.kge.TransE.html)** from Bordes *et al.*: [Translating Embeddings for Modeling Multi-Relational Data](https://proceedings.neurips.cc/paper/2013/file/1cecc7a77928ca8133fa24680a88d2f9-Paper.pdf) (NIPS 2013) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/kge_fb15k_237.py)] -* **[ComplEx](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.kge.ComplEx.html)** from Trouillon *et al.*: [Complex Embeddings for Simple Link Prediction](https://arxiv.org/abs/1606.06357) (ICML 2016) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/kge_fb15k_237.py)] -* **[DistMult](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.kge.DistMult.html)** from Yang *et al.*: [Embedding Entities and Relations for Learning and Inference in Knowledge Bases](https://arxiv.org/abs/1412.6575) (ICLR 2015) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/kge_fb15k_237.py)] -
- -**GNN operators and utilities:** -PyG comes with a rich set of neural network operators that are commonly used in many GNN models. -They follow an extensible design: It is easy to apply these operators and graph utilities to existing GNN layers and models to further enhance model performance. - -* **[DropEdge](https://pytorch-geometric.readthedocs.io/en/latest/modules/utils.html#torch_geometric.utils.dropout_edge)** from Rong *et al.*: [DropEdge: Towards Deep Graph Convolutional Networks on Node Classification](https://openreview.net/forum?id=Hkx1qkrKPr) (ICLR 2020) -* **[DropNode](https://pytorch-geometric.readthedocs.io/en/latest/modules/utils.html#torch_geometric.utils.dropout_node)**, **[MaskFeature](https://pytorch-geometric.readthedocs.io/en/latest/modules/utils.html#torch_geometric.utils.mask_feature)** and **[AddRandomEdge](https://pytorch-geometric.readthedocs.io/en/latest/modules/utils.html#torch_geometric.utils.add_random_edge)** from You *et al.*: [Graph Contrastive Learning with Augmentations](https://arxiv.org/abs/2010.13902) (NeurIPS 2020) -* **[DropPath](https://pytorch-geometric.readthedocs.io/en/latest/modules/utils.html#torch_geometric.utils.dropout_path)** from Li *et al.*: [MaskGAE: Masked Graph Modeling Meets Graph Autoencoders](https://arxiv.org/abs/2205.10053) (arXiv 2022) -* **[ShuffleNode](https://pytorch-geometric.readthedocs.io/en/latest/modules/utils.html#torch_geometric.utils.shuffle_node)** from Veličković *et al.*: [Deep Graph Infomax](https://arxiv.org/abs/1809.10341) (ICLR 2019) -* **[GraphNorm](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.norm.GraphNorm.html)** from Cai *et al.*: [GraphNorm: A Principled Approach to Accelerating Graph Neural Network Training](https://proceedings.mlr.press/v139/cai21e.html) (ICML 2021) -* **[GDC](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.transforms.GDC.html)** from Klicpera *et al.*: [Diffusion Improves Graph Learning](https://arxiv.org/abs/1911.05485) (NeurIPS 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/gcn.py)] - -
-Expand to see all implemented GNN operators and utilities... - -* **[GraphSizeNorm](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.norm.GraphSizeNorm.html)** from Dwivedi *et al.*: [Benchmarking Graph Neural Networks](https://arxiv.org/abs/2003.00982) (CoRR 2020) -* **[PairNorm](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.norm.PairNorm.html)** from Zhao and Akoglu: [PairNorm: Tackling Oversmoothing in GNNs](https://arxiv.org/abs/1909.12223) (ICLR 2020) -* **[MeanSubtractionNorm](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.norm.MeanSubtractionNorm.html)** from Yang *et al.*: [Revisiting "Over-smoothing" in Deep GCNs](https://arxiv.org/abs/2003.13663) (CoRR 2020) -* **[DiffGroupNorm](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.norm.DiffGroupNorm.html)** from Zhou *et al.*: [Towards Deeper Graph Neural Networks with Differentiable Group Normalization](https://arxiv.org/abs/2006.06972) (NeurIPS 2020) -* **[Tree Decomposition](https://pytorch-geometric.readthedocs.io/en/latest/modules/utils.html#torch_geometric.utils.tree_decomposition)** from Jin *et al.*: [Junction Tree Variational Autoencoder for Molecular Graph Generation](https://arxiv.org/abs/1802.04364) (ICML 2018) -* **[TGN](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.TGNMemory.html)** from Rossi *et al.*: [Temporal Graph Networks for Deep Learning on Dynamic Graphs](https://arxiv.org/abs/2006.10637) (GRL+ 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/tgn.py)] -* **[Weisfeiler Lehman Operator](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.WLConv.html)** from Weisfeiler and Lehman: [A Reduction of a Graph to a Canonical Form and an Algebra Arising During this Reduction](https://www.iti.zcu.cz/wl2018/pdf/wl_paper_translation.pdf) (Nauchno-Technicheskaya Informatsia 1968) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/wl_kernel.py)] -* **[Continuous Weisfeiler Lehman Operator](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.WLConvContinuous.html)** from Togninalli *et al.*: [Wasserstein Weisfeiler-Lehman Graph Kernels](https://arxiv.org/abs/1906.01277) (NeurIPS 2019) -* **[Label Propagation](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.LabelPropagation.html)** from Zhu and Ghahramani: [Learning from Labeled and Unlabeled Data with Label Propagation](http://mlg.eng.cam.ac.uk/zoubin/papers/CMU-CALD-02-107.pdf) (CMU-CALD 2002) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/label_prop.py)] -* **[Local Degree Profile](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.transforms.LocalDegreeProfile)** from Cai and Wang: [A Simple yet Effective Baseline for Non-attribute Graph Classification](https://arxiv.org/abs/1811.03508) (CoRR 2018) -* **[CorrectAndSmooth](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.CorrectAndSmooth.html)** from Huang *et al.*: [Combining Label Propagation And Simple Models Out-performs Graph Neural Networks](https://arxiv.org/abs/2010.13993) (CoRR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/correct_and_smooth.py)] -* **[Gini](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.functional.gini.html)** and **[BRO](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.functional.bro.html)** regularization from Henderson *et al.*: [Improving Molecular Graph Neural Network Explainability with Orthonormalization and Induced Sparsity](https://arxiv.org/abs/2105.04854) (ICML 2021) -* **[RootedEgoNets](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.transforms.RootedEgoNets)** and **[RootedRWSubgraph](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.transforms.RootedRWSubgraph)** from Zhao *et al.*: [From Stars to Subgraphs: Uplifting Any GNN with Local Structure Awareness](https://arxiv.org/abs/2110.03753) (ICLR 2022) -* **[FeaturePropagation](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.transforms.FeaturePropagation)** from Rossi *et al.*: [On the Unreasonable Effectiveness of Feature Propagation in Learning on Graphs with Missing Node Features](https://arxiv.org/abs/2111.12128) (CoRR 2021) -
- -**Scalable GNNs:** -PyG supports the implementation of Graph Neural Networks that can scale to large-scale graphs. -Such application is challenging since the entire graph, its associated features and the GNN parameters cannot fit into GPU memory. -Many state-of-the-art scalability approaches tackle this challenge by sampling neighborhoods for mini-batch training, graph clustering and partitioning, or by using simplified GNN models. -These approaches have been implemented in PyG, and can benefit from the above GNN layers, operators and models. - -* **[NeighborLoader](https://pytorch-geometric.readthedocs.io/en/latest/modules/loader.html#torch_geometric.loader.NeighborLoader)** from Hamilton *et al.*: [Inductive Representation Learning on Large Graphs](https://arxiv.org/abs/1706.02216) (NIPS 2017) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/reddit.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/ogbn_products_sage.py), [**Example3**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/ogbn_products_gat.py), [**Example4**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/hetero/to_hetero_mag.py)] -* **[ClusterGCN](https://pytorch-geometric.readthedocs.io/en/latest/modules/loader.html#torch_geometric.loader.ClusterLoader)** from Chiang *et al.*: [Cluster-GCN: An Efficient Algorithm for Training Deep and Large Graph Convolutional Networks](https://arxiv.org/abs/1905.07953) (KDD 2019) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/cluster_gcn_reddit.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/cluster_gcn_ppi.py)] -* **[GraphSAINT](https://pytorch-geometric.readthedocs.io/en/latest/modules/loader.html#torch_geometric.loader.GraphSAINTSampler)** from Zeng *et al.*: [GraphSAINT: Graph Sampling Based Inductive Learning Method](https://arxiv.org/abs/1907.04931) (ICLR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/graph_saint.py)] - -
-Expand to see all implemented scalable GNNs... - -* **[ShaDow](https://pytorch-geometric.readthedocs.io/en/latest/modules/loader.html#torch_geometric.loader.ShaDowKHopSampler)** from Zeng *et al.*: [Decoupling the Depth and Scope of Graph Neural Networks](https://arxiv.org/abs/2201.07858) (NeurIPS 2021) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/shadow.py)] -* **[SIGN](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.transforms.SIGN.html)** from Rossi *et al.*: [SIGN: Scalable Inception Graph Neural Networks](https://arxiv.org/abs/2004.11198) (CoRR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/sign.py)] -* **[HGTLoader](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.loader.HGTLoader.html)** from Hu *et al.*: [Heterogeneous Graph Transformer](https://arxiv.org/abs/2003.01332) (WWW 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/hetero/to_hetero_mag.py)] -
- -## Installation - -PyG is available for Python 3.7 to Python 3.11. - -### Anaconda - -You can now install PyG via [Anaconda](https://anaconda.org/pyg/pyg) for all major OS/PyTorch/CUDA combinations 🤗 -If you have not yet installed PyTorch, install it via `conda` as described in the [official PyTorch documentation](https://pytorch.org/get-started/locally/). -Given that you have PyTorch installed (`>=1.8.0`), simply run - -``` -conda install pyg -c pyg -``` - -### PyPi - -From **PyG 2.3** onwards, you can install and use PyG **without any external library** required except for PyTorch. -For this, simply run - -``` -pip install torch_geometric -``` - -### Additional Libraries - -If you want to utilize the full set of features from PyG, there exists several additional libraries you may want to install: - -* **[`pyg-lib`](https://github.com/pyg-team/pyg-lib)**: Heterogeneous GNN operators and graph sampling routines -* **[`torch-scatter`](https://github.com/rusty1s/pytorch_scatter)**: Accelerated and efficient sparse reductions -* **[`torch-sparse`](https://github.com/rusty1s/pytorch_sparse)**: [`SparseTensor`](https://pytorch-geometric.readthedocs.io/en/latest/advanced/sparse_tensor.html) support -* **[`torch-cluster`](https://github.com/rusty1s/pytorch_cluster)**: Graph clustering routines -* **[`torch-spline-conv`](https://github.com/rusty1s/pytorch_spline_conv)**: [`SplineConv`](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.SplineConv.html) support - -These packages come with their own CPU and GPU kernel implementations based on the [PyTorch C++/CUDA extension interface](https://github.com/pytorch/extension-cpp). -For a basic usage of PyG, these dependencies are **fully optional**. -We recommend to start with a minimal installation, and install additional dependencies once you start to actually need them. - -For ease of installation of these extensions, we provide `pip` wheels for all major OS/PyTorch/CUDA combinations, see [here](https://data.pyg.org/whl). - -#### PyTorch 2.0 - -To install the binaries for PyTorch 2.0.0, simply run - -``` -pip install pyg_lib torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://data.pyg.org/whl/torch-2.0.0+${CUDA}.html -``` - -where `${CUDA}` should be replaced by either `cpu`, `cu117`, or `cu118` depending on your PyTorch installation. - -| | `cpu` | `cu117` | `cu118` | -|-------------|-------|---------|---------| -| **Linux** | ✅ | ✅ | ✅ | -| **Windows** | ✅ | ✅ | ✅ | -| **macOS** | ✅ | | | - -#### PyTorch 1.13 - -To install the binaries for PyTorch 1.13.0, simply run - -``` -pip install pyg_lib torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://data.pyg.org/whl/torch-1.13.0+${CUDA}.html -``` - -where `${CUDA}` should be replaced by either `cpu`, `cu116`, or `cu117` depending on your PyTorch installation. - -| | `cpu` | `cu116` | `cu117` | -|-------------|-------|---------|---------| -| **Linux** | ✅ | ✅ | ✅ | -| **Windows** | ✅ | ✅ | ✅ | -| **macOS** | ✅ | | | - -**Note:** Binaries of older versions are also provided for PyTorch 1.4.0, PyTorch 1.5.0, PyTorch 1.6.0, PyTorch 1.7.0/1.7.1, PyTorch 1.8.0/1.8.1, PyTorch 1.9.0, PyTorch 1.10.0/1.10.1/1.10.2, PyTorch 1.11.0 and PyTorch 1.12.0/1.12.1 (following the same procedure). -**For older versions, you might need to explicitly specify the latest supported version number** or install via `pip install --no-index` in order to prevent a manual installation from source. -You can look up the latest supported version number [here](https://data.pyg.org/whl). - -### Nightly and Master - -In case you want to experiment with the latest PyG features which are not fully released yet, either install the **nightly version** of PyG via - -``` -pip install pyg-nightly -``` - -or install PyG **from master** via - -``` -pip install git+https://github.com/pyg-team/pytorch_geometric.git -``` - -## Cite - -Please cite [our paper](https://arxiv.org/abs/1903.02428) (and the respective papers of the methods used) if you use this code in your own work: - -``` -@inproceedings{Fey/Lenssen/2019, - title={Fast Graph Representation Learning with {PyTorch Geometric}}, - author={Fey, Matthias and Lenssen, Jan E.}, - booktitle={ICLR Workshop on Representation Learning on Graphs and Manifolds}, - year={2019}, -} -``` - -Feel free to [email us](mailto:matthias.fey@tu-dortmund.de) if you wish your work to be listed in the [external resources](https://pytorch-geometric.readthedocs.io/en/latest/external/resources.html). -If you notice anything unexpected, please open an [issue](https://github.com/pyg-team/pytorch_geometric/issues) and let us know. -If you have any questions or are missing a specific feature, feel free [to discuss them with us](https://github.com/pyg-team/pytorch_geometric/discussions). -We are motivated to constantly make PyG even better. diff --git a/pytorch_geometric-2.3.1/benchmark/citation/arma.py b/pytorch_geometric-2.3.1/benchmark/citation/arma.py deleted file mode 100644 index 923cc0c..0000000 --- a/pytorch_geometric-2.3.1/benchmark/citation/arma.py +++ /dev/null @@ -1,62 +0,0 @@ -import argparse - -import torch -import torch.nn.functional as F -from citation import get_planetoid_dataset, random_planetoid_splits, run - -from torch_geometric.nn import ARMAConv -from torch_geometric.profile import rename_profile_file - -parser = argparse.ArgumentParser() -parser.add_argument('--dataset', type=str, required=True) -parser.add_argument('--random_splits', action='store_true') -parser.add_argument('--runs', type=int, default=100) -parser.add_argument('--epochs', type=int, default=1000) -parser.add_argument('--lr', type=float, default=0.01) -parser.add_argument('--weight_decay', type=float, default=0.0005) -parser.add_argument('--early_stopping', type=int, default=100) -parser.add_argument('--hidden', type=int, default=16) -parser.add_argument('--dropout', type=float, default=0.5) -parser.add_argument('--no_normalize_features', action='store_true') -parser.add_argument('--num_stacks', type=int, default=1) -parser.add_argument('--num_layers', type=int, default=1) -parser.add_argument('--shared_weights', action='store_true') -parser.add_argument('--skip_dropout', type=float, default=0.75) -parser.add_argument('--inference', action='store_true') -parser.add_argument('--profile', action='store_true') -parser.add_argument('--bf16', action='store_true') -args = parser.parse_args() - - -class Net(torch.nn.Module): - def __init__(self, dataset): - super().__init__() - self.conv1 = ARMAConv(dataset.num_features, args.hidden, - args.num_stacks, args.num_layers, - args.shared_weights, dropout=args.skip_dropout) - self.conv2 = ARMAConv(args.hidden, dataset.num_classes, - args.num_stacks, args.num_layers, - args.shared_weights, dropout=args.skip_dropout) - - def reset_parameters(self): - self.conv1.reset_parameters() - self.conv2.reset_parameters() - - def forward(self, data): - x, edge_index = data.x, data.edge_index - x = F.relu(self.conv1(x, edge_index)) - x = F.dropout(x, p=args.dropout, training=self.training) - x = self.conv2(x, edge_index) - return F.log_softmax(x, dim=1) - - -dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) -permute_masks = random_planetoid_splits if args.random_splits else None -run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, - args.early_stopping, args.inference, args.profile, args.bf16, - permute_masks) - -if args.profile: - rename_profile_file('citation', ARMAConv.__name__, args.dataset, - str(args.random_splits), - 'inference' if args.inference else 'train') diff --git a/pytorch_geometric-2.3.1/benchmark/citation/gat.py b/pytorch_geometric-2.3.1/benchmark/citation/gat.py deleted file mode 100644 index 9462083..0000000 --- a/pytorch_geometric-2.3.1/benchmark/citation/gat.py +++ /dev/null @@ -1,60 +0,0 @@ -import argparse - -import torch -import torch.nn.functional as F -from citation import get_planetoid_dataset, random_planetoid_splits, run - -from torch_geometric.nn import GATConv -from torch_geometric.profile import rename_profile_file - -parser = argparse.ArgumentParser() -parser.add_argument('--dataset', type=str, required=True) -parser.add_argument('--random_splits', action='store_true') -parser.add_argument('--runs', type=int, default=100) -parser.add_argument('--epochs', type=int, default=1000) -parser.add_argument('--lr', type=float, default=0.005) -parser.add_argument('--weight_decay', type=float, default=0.0005) -parser.add_argument('--early_stopping', type=int, default=100) -parser.add_argument('--hidden', type=int, default=8) -parser.add_argument('--dropout', type=float, default=0.6) -parser.add_argument('--no_normalize_features', action='store_true') -parser.add_argument('--heads', type=int, default=8) -parser.add_argument('--output_heads', type=int, default=1) -parser.add_argument('--inference', action='store_true') -parser.add_argument('--profile', action='store_true') -parser.add_argument('--bf16', action='store_true') -args = parser.parse_args() - - -class Net(torch.nn.Module): - def __init__(self, dataset): - super().__init__() - self.conv1 = GATConv(dataset.num_features, args.hidden, - heads=args.heads, dropout=args.dropout) - self.conv2 = GATConv(args.hidden * args.heads, dataset.num_classes, - heads=args.output_heads, concat=False, - dropout=args.dropout) - - def reset_parameters(self): - self.conv1.reset_parameters() - self.conv2.reset_parameters() - - def forward(self, data): - x, edge_index = data.x, data.edge_index - x = F.dropout(x, p=args.dropout, training=self.training) - x = F.elu(self.conv1(x, edge_index)) - x = F.dropout(x, p=args.dropout, training=self.training) - x = self.conv2(x, edge_index) - return F.log_softmax(x, dim=1) - - -dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) -permute_masks = random_planetoid_splits if args.random_splits else None -run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, - args.early_stopping, args.inference, args.profile, args.bf16, - permute_masks) - -if args.profile: - rename_profile_file('citation', GATConv.__name__, args.dataset, - str(args.random_splits), - 'inference' if args.inference else 'train') diff --git a/pytorch_geometric-2.3.1/benchmark/citation/gcn.py b/pytorch_geometric-2.3.1/benchmark/citation/gcn.py deleted file mode 100644 index d91a0be..0000000 --- a/pytorch_geometric-2.3.1/benchmark/citation/gcn.py +++ /dev/null @@ -1,54 +0,0 @@ -import argparse - -import torch -import torch.nn.functional as F -from citation import get_planetoid_dataset, random_planetoid_splits, run - -from torch_geometric.nn import GCNConv -from torch_geometric.profile import rename_profile_file - -parser = argparse.ArgumentParser() -parser.add_argument('--dataset', type=str, required=True) -parser.add_argument('--random_splits', action='store_true') -parser.add_argument('--runs', type=int, default=100) -parser.add_argument('--epochs', type=int, default=200) -parser.add_argument('--lr', type=float, default=0.01) -parser.add_argument('--weight_decay', type=float, default=0.0005) -parser.add_argument('--early_stopping', type=int, default=10) -parser.add_argument('--hidden', type=int, default=16) -parser.add_argument('--dropout', type=float, default=0.5) -parser.add_argument('--no_normalize_features', action='store_true') -parser.add_argument('--inference', action='store_true') -parser.add_argument('--profile', action='store_true') -parser.add_argument('--bf16', action='store_true') -args = parser.parse_args() - - -class Net(torch.nn.Module): - def __init__(self, dataset): - super().__init__() - self.conv1 = GCNConv(dataset.num_features, args.hidden) - self.conv2 = GCNConv(args.hidden, dataset.num_classes) - - def reset_parameters(self): - self.conv1.reset_parameters() - self.conv2.reset_parameters() - - def forward(self, data): - x, edge_index = data.x, data.edge_index - x = F.relu(self.conv1(x, edge_index)) - x = F.dropout(x, p=args.dropout, training=self.training) - x = self.conv2(x, edge_index) - return F.log_softmax(x, dim=1) - - -dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) -permute_masks = random_planetoid_splits if args.random_splits else None -run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, - args.early_stopping, args.inference, args.profile, args.bf16, - permute_masks) - -if args.profile: - rename_profile_file('citation', GCNConv.__name__, args.dataset, - str(args.random_splits), - 'inference' if args.inference else 'train') diff --git a/pytorch_geometric-2.3.1/benchmark/citation/sgc.py b/pytorch_geometric-2.3.1/benchmark/citation/sgc.py deleted file mode 100644 index 1363323..0000000 --- a/pytorch_geometric-2.3.1/benchmark/citation/sgc.py +++ /dev/null @@ -1,50 +0,0 @@ -import argparse - -import torch -import torch.nn.functional as F -from citation import get_planetoid_dataset, random_planetoid_splits, run - -from torch_geometric.nn import SGConv -from torch_geometric.profile import rename_profile_file - -parser = argparse.ArgumentParser() -parser.add_argument('--dataset', type=str, required=True) -parser.add_argument('--random_splits', action='store_true') -parser.add_argument('--runs', type=int, default=100) -parser.add_argument('--epochs', type=int, default=200) -parser.add_argument('--lr', type=float, default=0.1) -parser.add_argument('--weight_decay', type=float, default=0.0005) -parser.add_argument('--early_stopping', type=int, default=10) -parser.add_argument('--no_normalize_features', action='store_true') -parser.add_argument('--K', type=int, default=2) -parser.add_argument('--inference', action='store_true') -parser.add_argument('--profile', action='store_true') -parser.add_argument('--bf16', action='store_true') -args = parser.parse_args() - - -class Net(torch.nn.Module): - def __init__(self, dataset): - super().__init__() - self.conv1 = SGConv(dataset.num_features, dataset.num_classes, - K=args.K, cached=True) - - def reset_parameters(self): - self.conv1.reset_parameters() - - def forward(self, data): - x, edge_index = data.x, data.edge_index - x = self.conv1(x, edge_index) - return F.log_softmax(x, dim=1) - - -dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) -permute_masks = random_planetoid_splits if args.random_splits else None -run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, - args.early_stopping, args.inference, args.profile, args.bf16, - permute_masks) - -if args.profile: - rename_profile_file('citation', SGConv.__name__, args.dataset, - str(args.random_splits), - 'inference' if args.inference else 'train') diff --git a/pytorch_geometric-2.3.1/benchmark/citation/train_eval.py b/pytorch_geometric-2.3.1/benchmark/citation/train_eval.py deleted file mode 100644 index 5427053..0000000 --- a/pytorch_geometric-2.3.1/benchmark/citation/train_eval.py +++ /dev/null @@ -1,171 +0,0 @@ -import time - -import torch -import torch.nn.functional as F -from torch import tensor -from torch.optim import Adam - -from torch_geometric.profile import timeit, torch_profile -from torch_geometric.utils import index_to_mask - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - -def random_planetoid_splits(data, num_classes): - # Set new random planetoid splits: - # * 20 * num_classes labels for training - # * 500 labels for validation - # * 1000 labels for testing - - indices = [] - for i in range(num_classes): - index = (data.y == i).nonzero().view(-1) - index = index[torch.randperm(index.size(0))] - indices.append(index) - - train_index = torch.cat([i[:20] for i in indices], dim=0) - - rest_index = torch.cat([i[20:] for i in indices], dim=0) - rest_index = rest_index[torch.randperm(rest_index.size(0))] - - data.train_mask = index_to_mask(train_index, size=data.num_nodes) - data.val_mask = index_to_mask(rest_index[:500], size=data.num_nodes) - data.test_mask = index_to_mask(rest_index[500:1500], size=data.num_nodes) - - return data - - -def run_train(dataset, model, runs, epochs, lr, weight_decay, early_stopping, - profiling, permute_masks=None, logger=None): - val_losses, accs, durations = [], [], [] - for run in range(runs): - data = dataset[0] - if permute_masks is not None: - data = permute_masks(data, dataset.num_classes) - data = data.to(device) - - model.to(device).reset_parameters() - optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay) - - if torch.cuda.is_available(): - torch.cuda.synchronize() - - t_start = time.perf_counter() - - best_val_loss = float('inf') - test_acc = 0 - val_loss_history = [] - - for epoch in range(1, epochs + 1): - if run == runs - 1 and epoch == epochs: - with timeit(): - train(model, optimizer, data) - else: - train(model, optimizer, data) - eval_info = evaluate(model, data) - eval_info['epoch'] = epoch - - if logger is not None: - logger(eval_info) - - if eval_info['val_loss'] < best_val_loss: - best_val_loss = eval_info['val_loss'] - test_acc = eval_info['test_acc'] - - val_loss_history.append(eval_info['val_loss']) - if early_stopping > 0 and epoch > epochs // 2: - tmp = tensor(val_loss_history[-(early_stopping + 1):-1]) - if eval_info['val_loss'] > tmp.mean().item(): - break - - if torch.cuda.is_available(): - torch.cuda.synchronize() - - t_end = time.perf_counter() - - val_losses.append(best_val_loss) - accs.append(test_acc) - durations.append(t_end - t_start) - loss, acc, duration = tensor(val_losses), tensor(accs), tensor(durations) - - print(f'Val Loss: {float(loss.mean()):.4f}, ' - f'Test Accuracy: {float(acc.mean()):.3f} ± {float(acc.std()):.3f}, ' - f'Duration: {float(duration.mean()):.3f}s') - - if profiling: - with torch_profile(): - train(model, optimizer, data) - - -@torch.no_grad() -def run_inference(dataset, model, epochs, profiling, bf16, permute_masks=None, - logger=None): - data = dataset[0] - if permute_masks is not None: - data = permute_masks(data, dataset.num_classes) - data = data.to(device) - - model.to(device).reset_parameters() - - if torch.cuda.is_available(): - amp = torch.cuda.amp.autocast(enabled=False) - else: - amp = torch.cpu.amp.autocast(enabled=bf16) - if bf16: - data.x = data.x.to(torch.bfloat16) - - with amp: - for epoch in range(1, epochs + 1): - if epoch == epochs: - with timeit(): - inference(model, data) - else: - inference(model, data) - - if profiling: - with torch_profile(): - inference(model, data) - - -def run(dataset, model, runs, epochs, lr, weight_decay, early_stopping, - inference, profiling, bf16, permute_masks=None, logger=None): - if not inference: - run_train(dataset, model, runs, epochs, lr, weight_decay, - early_stopping, profiling, permute_masks, logger) - else: - run_inference(dataset, model, epochs, profiling, bf16, permute_masks, - logger) - - -def train(model, optimizer, data): - model.train() - optimizer.zero_grad() - out = model(data) - loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask]) - loss.backward() - optimizer.step() - - -@torch.no_grad() -def evaluate(model, data): - model.eval() - - out = model(data) - - outs = {} - for key in ['train', 'val', 'test']: - mask = data[f'{key}_mask'] - loss = float(F.nll_loss(out[mask], data.y[mask])) - pred = out[mask].argmax(1) - acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item() - - outs[f'{key}_loss'] = loss - outs[f'{key}_acc'] = acc - - return outs - - -@torch.no_grad() -def inference(model, data): - model.eval() - model(data) diff --git a/pytorch_geometric-2.3.1/benchmark/inference/inference_benchmark.py b/pytorch_geometric-2.3.1/benchmark/inference/inference_benchmark.py deleted file mode 100644 index be72ed0..0000000 --- a/pytorch_geometric-2.3.1/benchmark/inference/inference_benchmark.py +++ /dev/null @@ -1,283 +0,0 @@ -import argparse -from collections import defaultdict -from contextlib import nullcontext - -import torch - -from benchmark.utils import ( - emit_itt, - get_dataset_with_transformation, - get_model, - get_split_masks, - save_benchmark_data, - write_to_csv, -) -from torch_geometric.loader import NeighborLoader -from torch_geometric.nn import PNAConv -from torch_geometric.profile import rename_profile_file, timeit, torch_profile - -supported_sets = { - 'ogbn-mag': ['rgat', 'rgcn'], - 'ogbn-products': ['edge_cnn', 'gat', 'gcn', 'pna', 'sage'], - 'Reddit': ['edge_cnn', 'gat', 'gcn', 'pna', 'sage'], -} - - -@torch.no_grad() -def full_batch_inference(model, data): - model.eval() - if hasattr(data, 'adj_t'): - edge_index = data.adj_t - else: - edge_index = data.edge_index - return model(data.x, edge_index) - - -def test(y, loader): - y_hat = y.argmax(dim=-1) - y = loader.data.y.to(y_hat.device) - mask = loader.data.test_mask - return int((y_hat[mask] == y[mask]).sum()) / int(mask.sum()) - - -def run(args: argparse.ArgumentParser): - csv_data = defaultdict(list) - - # cuda device is not suitable for full batch mode - device = torch.device( - 'cuda' if not args.full_batch and torch.cuda.is_available() else 'cpu') - - print('BENCHMARK STARTS') - for dataset_name in args.datasets: - assert dataset_name in supported_sets.keys( - ), f"Dataset {dataset_name} isn't supported." - print(f'Dataset: {dataset_name}') - load_time = timeit() if args.measure_load_time else nullcontext() - with load_time: - result = get_dataset_with_transformation(dataset_name, args.root, - args.use_sparse_tensor, - args.bf16) - dataset, num_classes, transformation = result - data = dataset.to(device) - hetero = True if dataset_name == 'ogbn-mag' else False - mask = ('paper', None) if dataset_name == 'ogbn-mag' else None - _, _, test_mask = get_split_masks(data, dataset_name) - degree = None - - if args.num_layers != [1] and not hetero and args.num_steps != -1: - raise ValueError("Layer-wise inference requires `steps=-1`") - - if torch.cuda.is_available(): - amp = torch.cuda.amp.autocast(enabled=False) - else: - amp = torch.cpu.amp.autocast(enabled=args.bf16) - - inputs_channels = data[ - 'paper'].num_features if dataset_name == 'ogbn-mag' \ - else dataset.num_features - - for model_name in args.models: - if model_name not in supported_sets[dataset_name]: - print(f'Configuration of {dataset_name} + {model_name} ' - f'not supported. Skipping.') - continue - with_loader = not args.full_batch or (model_name == 'pna' - and degree is None) - print(f'Evaluation bench for {model_name}:') - - for batch_size in args.eval_batch_sizes: - num_nodes = data[ - 'paper'].num_nodes if hetero else data.num_nodes - sampler = torch.utils.data.RandomSampler( - range(num_nodes), num_samples=args.num_steps * batch_size - ) if args.num_steps != -1 and with_loader else None - kwargs = { - 'batch_size': batch_size, - 'shuffle': False, - 'num_workers': args.num_workers, - } - if not hetero: - subgraph_loader = NeighborLoader( - data, - num_neighbors=[-1], # layer-wise inference - input_nodes=mask, - sampler=sampler, - filter_per_worker=args.filter_per_worker, - **kwargs, - ) if with_loader else None - if args.evaluate and not args.full_batch: - test_loader = NeighborLoader( - data, - num_neighbors=[-1], # layer-wise inference - input_nodes=test_mask, - sampler=None, - filter_per_worker=args.filter_per_worker, - **kwargs, - ) - - for layers in args.num_layers: - num_neighbors = [args.hetero_num_neighbors] * layers - if hetero: - # batch-wise inference - subgraph_loader = NeighborLoader( - data, - num_neighbors=num_neighbors, - input_nodes=mask, - sampler=sampler, - filter_per_worker=args.filter_per_worker, - **kwargs, - ) if with_loader else None - if args.evaluate and not args.full_batch: - test_loader = NeighborLoader( - data, - num_neighbors=num_neighbors, - input_nodes=test_mask, - sampler=None, - filter_per_worker=args.filter_per_worker, - **kwargs, - ) - - for hidden_channels in args.num_hidden_channels: - print('----------------------------------------------') - print(f'Batch size={batch_size}, ' - f'Layers amount={layers}, ' - f'Num_neighbors={num_neighbors}, ' - f'Hidden features size={hidden_channels}, ' - f'Sparse tensor={args.use_sparse_tensor}') - params = { - 'inputs_channels': inputs_channels, - 'hidden_channels': hidden_channels, - 'output_channels': num_classes, - 'num_heads': args.num_heads, - 'num_layers': layers, - } - - if model_name == 'pna': - if degree is None: - degree = PNAConv.get_degree_histogram( - subgraph_loader) - print(f'Calculated degree for {dataset_name}.') - params['degree'] = degree - - model = get_model( - model_name, params, - metadata=data.metadata() if hetero else None) - model = model.to(device) - # TODO: Migrate to ModelHubMixin. - if args.ckpt_path: - state_dict = torch.load(args.ckpt_path) - model.load_state_dict(state_dict) - model.eval() - - # Define context manager parameters: - if args.cpu_affinity and with_loader: - cpu_affinity = subgraph_loader.enable_cpu_affinity( - args.loader_cores) - else: - cpu_affinity = nullcontext() - profile = torch_profile( - ) if args.profile else nullcontext() - itt = emit_itt( - ) if args.vtune_profile else nullcontext() - - if args.full_batch and args.use_sparse_tensor: - data = transformation(data) - - with cpu_affinity, amp, timeit() as time: - for _ in range(args.warmup): - if args.full_batch: - full_batch_inference(model, data) - else: - model.inference(subgraph_loader, device, - progress_bar=True) - if args.warmup > 0: - time.reset() - with itt, profile: - if args.full_batch: - y = full_batch_inference(model, data) - if args.evaluate: - mask = data.test_mask - pred = y[mask].argmax(1) - test_acc = pred.eq(data.y[mask]).sum( - ).item() / mask.sum().item() - print(f'Full Batch Test Accuracy: \ - {test_acc:.4f}') - else: - y = model.inference( - subgraph_loader, - device, - progress_bar=True, - ) - if args.evaluate: - test_acc = model.test( - y, - test_loader, - device, - progress_bar=True, - ) - print(f'Mini Batch Test Accuracy: \ - {test_acc:.4f}') - - if args.profile: - rename_profile_file(model_name, dataset_name, - str(batch_size), str(layers), - str(hidden_channels), - str(num_neighbors)) - total_time = time.duration - if args.num_steps != -1: - total_num_samples = args.num_steps * batch_size - else: - total_num_samples = num_nodes - throughput = total_num_samples / total_time - latency = total_time / total_num_samples * 1000 - print(f'Throughput: {throughput:.3f} samples/s') - print(f'Latency: {latency:.3f} ms') - - save_benchmark_data(csv_data, batch_size, layers, - num_neighbors, hidden_channels, - total_time, model_name, - dataset_name, - args.use_sparse_tensor) - if args.write_csv: - write_to_csv(csv_data) - - -if __name__ == '__main__': - argparser = argparse.ArgumentParser('GNN inference benchmark') - add = argparser.add_argument - - add('--datasets', nargs='+', - default=['ogbn-mag', 'ogbn-products', 'Reddit'], type=str) - add('--use-sparse-tensor', action='store_true', - help='use torch_sparse.SparseTensor as graph storage format') - add('--models', nargs='+', - default=['edge_cnn', 'gat', 'gcn', 'pna', 'rgat', 'rgcn'], type=str) - add('--root', default='../../data', type=str, - help='relative path to look for the datasets') - add('--eval-batch-sizes', nargs='+', default=[512, 1024, 2048, 4096, 8192], - type=int) - add('--num-layers', nargs='+', default=[2, 3], type=int) - add('--num-hidden-channels', nargs='+', default=[64, 128, 256], type=int) - add('--num-heads', default=2, type=int, - help='number of hidden attention heads, applies only for gat and rgat') - add('--hetero-num-neighbors', default=10, type=int, - help='number of neighbors to sample per layer for hetero workloads') - add('--num-workers', default=0, type=int) - add('--num-steps', default=-1, type=int, - help='number of steps, -1 means iterating through all the data') - add('--warmup', default=1, type=int) - add('--profile', action='store_true') - add('--vtune-profile', action='store_true') - add('--bf16', action='store_true') - add('--cpu-affinity', action='store_true', - help='Use DataLoader affinitzation.') - add('--loader-cores', nargs='+', default=[], type=int, - help="List of CPU core IDs to use for DataLoader workers") - add('--filter-per-worker', action='store_true', - help='Enable filter-per-worker feature of the dataloader.') - add('--measure-load-time', action='store_true') - add('--full-batch', action='store_true', help='Use full batch mode') - add('--evaluate', action='store_true') - add('--ckpt_path', type=str, help='Checkpoint path for loading a model') - add('--write-csv', action='store_true', help='Write benchmark data to csv') - run(argparser.parse_args()) diff --git a/pytorch_geometric-2.3.1/benchmark/kernel/gin.py b/pytorch_geometric-2.3.1/benchmark/kernel/gin.py deleted file mode 100644 index 0fea309..0000000 --- a/pytorch_geometric-2.3.1/benchmark/kernel/gin.py +++ /dev/null @@ -1,210 +0,0 @@ -import torch -import torch.nn.functional as F -from torch.nn import BatchNorm1d as BN -from torch.nn import Linear, ReLU, Sequential - -from torch_geometric.nn import GINConv, JumpingKnowledge, global_mean_pool - - -class GIN0(torch.nn.Module): - def __init__(self, dataset, num_layers, hidden): - super().__init__() - self.conv1 = GINConv( - Sequential( - Linear(dataset.num_features, hidden), - ReLU(), - Linear(hidden, hidden), - ReLU(), - BN(hidden), - ), train_eps=False) - self.convs = torch.nn.ModuleList() - for i in range(num_layers - 1): - self.convs.append( - GINConv( - Sequential( - Linear(hidden, hidden), - ReLU(), - Linear(hidden, hidden), - ReLU(), - BN(hidden), - ), train_eps=False)) - self.lin1 = Linear(hidden, hidden) - self.lin2 = Linear(hidden, dataset.num_classes) - - def reset_parameters(self): - self.conv1.reset_parameters() - for conv in self.convs: - conv.reset_parameters() - self.lin1.reset_parameters() - self.lin2.reset_parameters() - - def forward(self, data): - x, edge_index, batch = data.x, data.edge_index, data.batch - x = self.conv1(x, edge_index) - for conv in self.convs: - x = conv(x, edge_index) - x = global_mean_pool(x, batch) - x = F.relu(self.lin1(x)) - x = F.dropout(x, p=0.5, training=self.training) - x = self.lin2(x) - return F.log_softmax(x, dim=-1) - - def __repr__(self): - return self.__class__.__name__ - - -class GIN0WithJK(torch.nn.Module): - def __init__(self, dataset, num_layers, hidden, mode='cat'): - super().__init__() - self.conv1 = GINConv( - Sequential( - Linear(dataset.num_features, hidden), - ReLU(), - Linear(hidden, hidden), - ReLU(), - BN(hidden), - ), train_eps=False) - self.convs = torch.nn.ModuleList() - for i in range(num_layers - 1): - self.convs.append( - GINConv( - Sequential( - Linear(hidden, hidden), - ReLU(), - Linear(hidden, hidden), - ReLU(), - BN(hidden), - ), train_eps=False)) - self.jump = JumpingKnowledge(mode) - if mode == 'cat': - self.lin1 = Linear(num_layers * hidden, hidden) - else: - self.lin1 = Linear(hidden, hidden) - self.lin2 = Linear(hidden, dataset.num_classes) - - def reset_parameters(self): - self.conv1.reset_parameters() - for conv in self.convs: - conv.reset_parameters() - self.jump.reset_parameters() - self.lin1.reset_parameters() - self.lin2.reset_parameters() - - def forward(self, data): - x, edge_index, batch = data.x, data.edge_index, data.batch - x = self.conv1(x, edge_index) - xs = [x] - for conv in self.convs: - x = conv(x, edge_index) - xs += [x] - x = self.jump(xs) - x = global_mean_pool(x, batch) - x = F.relu(self.lin1(x)) - x = F.dropout(x, p=0.5, training=self.training) - x = self.lin2(x) - return F.log_softmax(x, dim=-1) - - def __repr__(self): - return self.__class__.__name__ - - -class GIN(torch.nn.Module): - def __init__(self, dataset, num_layers, hidden): - super().__init__() - self.conv1 = GINConv( - Sequential( - Linear(dataset.num_features, hidden), - ReLU(), - Linear(hidden, hidden), - ReLU(), - BN(hidden), - ), train_eps=True) - self.convs = torch.nn.ModuleList() - for i in range(num_layers - 1): - self.convs.append( - GINConv( - Sequential( - Linear(hidden, hidden), - ReLU(), - Linear(hidden, hidden), - ReLU(), - BN(hidden), - ), train_eps=True)) - self.lin1 = Linear(hidden, hidden) - self.lin2 = Linear(hidden, dataset.num_classes) - - def reset_parameters(self): - self.conv1.reset_parameters() - for conv in self.convs: - conv.reset_parameters() - self.lin1.reset_parameters() - self.lin2.reset_parameters() - - def forward(self, data): - x, edge_index, batch = data.x, data.edge_index, data.batch - x = self.conv1(x, edge_index) - for conv in self.convs: - x = conv(x, edge_index) - x = global_mean_pool(x, batch) - x = F.relu(self.lin1(x)) - x = F.dropout(x, p=0.5, training=self.training) - x = self.lin2(x) - return F.log_softmax(x, dim=-1) - - def __repr__(self): - return self.__class__.__name__ - - -class GINWithJK(torch.nn.Module): - def __init__(self, dataset, num_layers, hidden, mode='cat'): - super().__init__() - self.conv1 = GINConv( - Sequential( - Linear(dataset.num_features, hidden), - ReLU(), - Linear(hidden, hidden), - ReLU(), - BN(hidden), - ), train_eps=True) - self.convs = torch.nn.ModuleList() - for i in range(num_layers - 1): - self.convs.append( - GINConv( - Sequential( - Linear(hidden, hidden), - ReLU(), - Linear(hidden, hidden), - ReLU(), - BN(hidden), - ), train_eps=True)) - self.jump = JumpingKnowledge(mode) - if mode == 'cat': - self.lin1 = Linear(num_layers * hidden, hidden) - else: - self.lin1 = Linear(hidden, hidden) - self.lin2 = Linear(hidden, dataset.num_classes) - - def reset_parameters(self): - self.conv1.reset_parameters() - for conv in self.convs: - conv.reset_parameters() - self.jump.reset_parameters() - self.lin1.reset_parameters() - self.lin2.reset_parameters() - - def forward(self, data): - x, edge_index, batch = data.x, data.edge_index, data.batch - x = self.conv1(x, edge_index) - xs = [x] - for conv in self.convs: - x = conv(x, edge_index) - xs += [x] - x = self.jump(xs) - x = global_mean_pool(x, batch) - x = F.relu(self.lin1(x)) - x = F.dropout(x, p=0.5, training=self.training) - x = self.lin2(x) - return F.log_softmax(x, dim=-1) - - def __repr__(self): - return self.__class__.__name__ diff --git a/pytorch_geometric-2.3.1/benchmark/kernel/train_eval.py b/pytorch_geometric-2.3.1/benchmark/kernel/train_eval.py deleted file mode 100644 index 6ec1195..0000000 --- a/pytorch_geometric-2.3.1/benchmark/kernel/train_eval.py +++ /dev/null @@ -1,155 +0,0 @@ -import time - -import torch -import torch.nn.functional as F -from sklearn.model_selection import StratifiedKFold -from torch import tensor -from torch.optim import Adam - -from torch_geometric.loader import DataLoader -from torch_geometric.loader import DenseDataLoader as DenseLoader - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - -def cross_validation_with_val_set(dataset, model, folds, epochs, batch_size, - lr, lr_decay_factor, lr_decay_step_size, - weight_decay, logger=None): - - val_losses, accs, durations = [], [], [] - for fold, (train_idx, test_idx, - val_idx) in enumerate(zip(*k_fold(dataset, folds))): - - train_dataset = dataset[train_idx] - test_dataset = dataset[test_idx] - val_dataset = dataset[val_idx] - - if 'adj' in train_dataset[0]: - train_loader = DenseLoader(train_dataset, batch_size, shuffle=True) - val_loader = DenseLoader(val_dataset, batch_size, shuffle=False) - test_loader = DenseLoader(test_dataset, batch_size, shuffle=False) - else: - train_loader = DataLoader(train_dataset, batch_size, shuffle=True) - val_loader = DataLoader(val_dataset, batch_size, shuffle=False) - test_loader = DataLoader(test_dataset, batch_size, shuffle=False) - - model.to(device).reset_parameters() - optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay) - - if torch.cuda.is_available(): - torch.cuda.synchronize() - - t_start = time.perf_counter() - - for epoch in range(1, epochs + 1): - train_loss = train(model, optimizer, train_loader) - val_losses.append(eval_loss(model, val_loader)) - accs.append(eval_acc(model, test_loader)) - eval_info = { - 'fold': fold, - 'epoch': epoch, - 'train_loss': train_loss, - 'val_loss': val_losses[-1], - 'test_acc': accs[-1], - } - - if logger is not None: - logger(eval_info) - - if epoch % lr_decay_step_size == 0: - for param_group in optimizer.param_groups: - param_group['lr'] = lr_decay_factor * param_group['lr'] - - if torch.cuda.is_available(): - torch.cuda.synchronize() - - t_end = time.perf_counter() - durations.append(t_end - t_start) - - loss, acc, duration = tensor(val_losses), tensor(accs), tensor(durations) - loss, acc = loss.view(folds, epochs), acc.view(folds, epochs) - loss, argmin = loss.min(dim=1) - acc = acc[torch.arange(folds, dtype=torch.long), argmin] - - loss_mean = loss.mean().item() - acc_mean = acc.mean().item() - acc_std = acc.std().item() - duration_mean = duration.mean().item() - print(f'Val Loss: {loss_mean:.4f}, Test Accuracy: {acc_mean:.3f} ' - f'± {acc_std:.3f}, Duration: {duration_mean:.3f}') - - return loss_mean, acc_mean, acc_std - - -def k_fold(dataset, folds): - skf = StratifiedKFold(folds, shuffle=True, random_state=12345) - - test_indices, train_indices = [], [] - for _, idx in skf.split(torch.zeros(len(dataset)), dataset.data.y): - test_indices.append(torch.from_numpy(idx).to(torch.long)) - - val_indices = [test_indices[i - 1] for i in range(folds)] - - for i in range(folds): - train_mask = torch.ones(len(dataset), dtype=torch.bool) - train_mask[test_indices[i]] = 0 - train_mask[val_indices[i]] = 0 - train_indices.append(train_mask.nonzero(as_tuple=False).view(-1)) - - return train_indices, test_indices, val_indices - - -def num_graphs(data): - if hasattr(data, 'num_graphs'): - return data.num_graphs - else: - return data.x.size(0) - - -def train(model, optimizer, loader): - model.train() - - total_loss = 0 - for data in loader: - optimizer.zero_grad() - data = data.to(device) - out = model(data) - loss = F.nll_loss(out, data.y.view(-1)) - loss.backward() - total_loss += loss.item() * num_graphs(data) - optimizer.step() - return total_loss / len(loader.dataset) - - -def eval_acc(model, loader): - model.eval() - - correct = 0 - for data in loader: - data = data.to(device) - with torch.no_grad(): - pred = model(data).max(1)[1] - correct += pred.eq(data.y.view(-1)).sum().item() - return correct / len(loader.dataset) - - -def eval_loss(model, loader): - model.eval() - - loss = 0 - for data in loader: - data = data.to(device) - with torch.no_grad(): - out = model(data) - loss += F.nll_loss(out, data.y.view(-1), reduction='sum').item() - return loss / len(loader.dataset) - - -@torch.no_grad() -def inference_run(model, loader, bf16): - model.eval() - for data in loader: - data = data.to(device) - if bf16: - data.x = data.x.to(torch.bfloat16) - model(data) diff --git a/pytorch_geometric-2.3.1/benchmark/loader/neighbor_loader.py b/pytorch_geometric-2.3.1/benchmark/loader/neighbor_loader.py deleted file mode 100644 index e5c906c..0000000 --- a/pytorch_geometric-2.3.1/benchmark/loader/neighbor_loader.py +++ /dev/null @@ -1,132 +0,0 @@ -import argparse -import ast -import os.path as osp -from contextlib import nullcontext -from timeit import default_timer - -import tqdm -from ogb.nodeproppred import PygNodePropPredDataset - -import torch_geometric.transforms as T -from torch_geometric.datasets import OGB_MAG -from torch_geometric.loader import NeighborLoader -from torch_geometric.profile import torch_profile - - -def run(args: argparse.ArgumentParser): - for dataset_name in args.datasets: - print(f"Dataset: {dataset_name}") - root = osp.join(args.root, dataset_name) - transform = T.ToSparseTensor( - remove_edge_index=False) if args.use_sparse_tensor else None - if dataset_name == 'mag': - transform = (T.ToUndirected(merge=True) if transform is None else - T.Compose([T.ToUndirected(merge=True), transform])) - dataset = OGB_MAG(root=root, transform=transform) - train_idx = ('paper', dataset[0]['paper'].train_mask) - eval_idx = ('paper', None) - neighbor_sizes = (args.hetero_neighbor_sizes - if args.hetero_neighbor_sizes else None) - else: - dataset = PygNodePropPredDataset(f'ogbn-{dataset_name}', root) - split_idx = dataset.get_idx_split() - train_idx = split_idx['train'] - eval_idx = None - neighbor_sizes = (args.homo_neighbor_sizes - if args.homo_neighbor_sizes else None) - - data = dataset[0].to(args.device) - average_times = [] - profile = torch_profile() if args.profile else nullcontext() - # run dataloader iteration - if neighbor_sizes is not None: - for num_neighbors in neighbor_sizes: - print(f'Training sampling with {num_neighbors} neighbors') - for batch_size in args.batch_sizes: - train_loader = NeighborLoader( - data, num_neighbors=num_neighbors, - input_nodes=train_idx, batch_size=batch_size, - shuffle=True, num_workers=args.num_workers, - filter_per_worker=args.filter_per_worker) - cpu_affinity = train_loader.enable_cpu_affinity( - args.loader_cores - ) if args.cpu_affinity else nullcontext() - runtimes = [] - num_iterations = 0 - with profile, cpu_affinity: - for runit in range(args.runs): - start = default_timer() - for batch in tqdm.tqdm(train_loader): - num_iterations += 1 - stop = default_timer() - runtimes.append(round(stop - start, 3)) - average_time = round(sum(runtimes) / args.runs, 3) - print(f'batch size={batch_size}, ' - f'iterations={num_iterations}, ' - f'runtimes={runtimes}, ' - f'average runtime={average_time}') - average_times.append(average_time) - eval_batch_sizes = (args.eval_batch_sizes - if args.eval_batch_sizes else None) - if eval_batch_sizes is not None: - print('Evaluation sampling with all neighbors') - for batch_size in eval_batch_sizes: - subgraph_loader = NeighborLoader( - data, - num_neighbors=[-1], - input_nodes=eval_idx, - batch_size=batch_size, - shuffle=False, - num_workers=args.num_workers, - filter_per_worker=args.filter_per_worker, - ) - cpu_affinity = subgraph_loader.enable_cpu_affinity( - args.loader_cores) if args.cpu_affinity else nullcontext() - runtimes = [] - num_iterations = 0 - with profile, cpu_affinity: - for runit in range(args.runs): - start = default_timer() - for batch in tqdm.tqdm(subgraph_loader): - num_iterations += 1 - stop = default_timer() - runtimes.append(round(stop - start, 3)) - average_time = round(sum(runtimes) / args.runs, 3) - print(f'batch size={batch_size}, ' - f'iterations={num_iterations}, ' - f'runtimes={runtimes}, ' - f'average runtime={average_time}') - average_times.append(average_time) - print(f"Total time averages: {average_times}") - - -if __name__ == '__main__': - parser = argparse.ArgumentParser('NeighborLoader Sampling Benchmarking') - add = parser.add_argument - - add('--device', default='cpu') - add('--datasets', nargs="+", default=['arxiv', 'products', 'mag']) - add('--root', default='../../data') - add('--batch-sizes', default=[8192, 4096, 2048, 1024, 512], - type=ast.literal_eval) - add('--eval-batch-sizes', default=[16384, 8192, 4096, 2048, 1024, 512], - type=ast.literal_eval) - add('--homo-neighbor_sizes', default=[[10, 5], [15, 10, 5], [20, 15, 10]], - type=ast.literal_eval) - add('--hetero-neighbor_sizes', default=[[5], [10], [10, 5]], - type=ast.literal_eval) - add('--use-sparse-tensor', action='store_true', - help='use torch_sparse.SparseTensor as graph storage format') - add('--num-workers', type=int, default=0, - help="Number of DataLoader workers to use.") - add('--runs', type=int, default=3, - help="Number of iterations for each test setting.") - add('--profile', default=False, action='store_true', - help="Run torch.profiler.") - add('--filter-per-worker', default=False, action='store_true', - help="Use filter per worker.") - add('--cpu-affinity', default=False, action='store_true', - help="Use DataLoader affinitzation.") - add('--loader-cores', nargs='+', default=[], type=int, - help="List of CPU core IDs to use for DataLoader workers.") - run(parser.parse_args()) diff --git a/pytorch_geometric-2.3.1/benchmark/points/train_eval.py b/pytorch_geometric-2.3.1/benchmark/points/train_eval.py deleted file mode 100644 index 9640deb..0000000 --- a/pytorch_geometric-2.3.1/benchmark/points/train_eval.py +++ /dev/null @@ -1,111 +0,0 @@ -import time - -import torch -import torch.nn.functional as F -from torch.optim import Adam - -from torch_geometric.loader import DataLoader -from torch_geometric.profile import timeit, torch_profile - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - -def run_train(train_dataset, test_dataset, model, epochs, batch_size, lr, - lr_decay_factor, lr_decay_step_size, weight_decay): - model = model.to(device) - optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay) - - train_loader = DataLoader(train_dataset, batch_size, shuffle=True) - test_loader = DataLoader(test_dataset, batch_size, shuffle=False) - - for epoch in range(1, epochs + 1): - if torch.cuda.is_available(): - torch.cuda.synchronize() - - t_start = time.perf_counter() - - train(model, optimizer, train_loader, device) - test_acc = test(model, test_loader, device) - - if torch.cuda.is_available(): - torch.cuda.synchronize() - - t_end = time.perf_counter() - - print(f'Epoch: {epoch:03d}, Test: {test_acc:.4f}, ' - f'Duration: {t_end - t_start:.2f}') - - if epoch % lr_decay_step_size == 0: - for param_group in optimizer.param_groups: - param_group['lr'] = lr_decay_factor * param_group['lr'] - - -@torch.no_grad() -def run_inference(test_dataset, model, epochs, batch_size, profiling, bf16): - model = model.to(device) - test_loader = DataLoader(test_dataset, batch_size, shuffle=False) - - if torch.cuda.is_available(): - amp = torch.cuda.amp.autocast(enabled=False) - else: - amp = torch.cpu.amp.autocast(enabled=bf16) - - with amp: - for epoch in range(1, epochs + 1): - print("Epoch: ", epoch) - if epoch == epochs: - with timeit(): - inference(model, test_loader, device, bf16) - else: - inference(model, test_loader, device, bf16) - - if profiling: - with torch_profile(): - inference(model, test_loader, device, bf16) - - -def run(train_dataset, test_dataset, model, epochs, batch_size, lr, - lr_decay_factor, lr_decay_step_size, weight_decay, inference, - profiling, bf16): - if not inference: - run_train(train_dataset, test_dataset, model, epochs, batch_size, lr, - lr_decay_factor, lr_decay_step_size, weight_decay) - else: - run_inference(test_dataset, model, epochs, batch_size, profiling, bf16) - - -def train(model, optimizer, train_loader, device): - model.train() - - for data in train_loader: - optimizer.zero_grad() - data = data.to(device) - out = model(data.pos, data.batch) - loss = F.nll_loss(out, data.y) - loss.backward() - optimizer.step() - - -@torch.no_grad() -def test(model, test_loader, device): - model.eval() - - correct = 0 - for data in test_loader: - data = data.to(device) - pred = model(data.pos, data.batch).max(1)[1] - correct += pred.eq(data.y).sum().item() - test_acc = correct / len(test_loader.dataset) - - return test_acc - - -@torch.no_grad() -def inference(model, test_loader, device, bf16): - model.eval() - for data in test_loader: - data = data.to(device) - if bf16: - data.pos = data.pos.to(torch.bfloat16) - model = model.to(torch.bfloat16) - model(data.pos, data.batch) diff --git a/pytorch_geometric-2.3.1/benchmark/runtime/dgl/gat.py b/pytorch_geometric-2.3.1/benchmark/runtime/dgl/gat.py deleted file mode 100644 index c06398c..0000000 --- a/pytorch_geometric-2.3.1/benchmark/runtime/dgl/gat.py +++ /dev/null @@ -1,130 +0,0 @@ -import dgl.function as fn -import torch -import torch.nn.functional as F -from dgl.nn.pytorch import EdgeSoftmax -from torch.nn import Parameter - -from torch_geometric.nn.inits import glorot, zeros - - -class GATConv(torch.nn.Module): - def __init__(self, g, in_channels, out_channels, heads=1, - negative_slope=0.2, dropout=0): - super().__init__() - - self.g = g - self.in_channels = in_channels - self.out_channels = out_channels - self.heads = heads - self.negative_slope = negative_slope - self.dropout = dropout - - self.weight = Parameter(torch.Tensor(in_channels, - heads * out_channels)) - self.att = Parameter(torch.Tensor(1, heads, 2 * out_channels)) - self.bias = Parameter(torch.Tensor(heads * out_channels)) - self.reset_parameters() - - def reset_parameters(self): - glorot(self.weight) - glorot(self.att) - zeros(self.bias) - - def gat_msg(self, edge): - alpha = torch.cat([edge.src['x'], edge.dst['x']], dim=-1) - alpha = (alpha * self.att).sum(dim=-1) - alpha = F.leaky_relu(alpha, self.negative_slope) - return {'m': edge.src['x'], 'a': alpha} - - def gat_reduce(self, node): - alpha = torch.softmax(node.mailbox['a'], dim=1) - alpha = F.dropout(alpha, p=self.dropout, training=self.training) - x = (node.mailbox['m'] * alpha.unsqueeze(-1)).sum(dim=1) - return {'x': x} - - def forward(self, x): - x = torch.mm(x, self.weight).view(-1, self.heads, self.out_channels) - self.g.ndata['x'] = x - self.g.update_all(self.gat_msg, self.gat_reduce) - x = self.g.ndata.pop('x') - x = x.view(-1, self.heads * self.out_channels) - x = x + self.bias - return x - - -class GAT(torch.nn.Module): - def __init__(self, g, in_channels, out_channels): - super().__init__() - self.g = g - self.conv1 = GATConv(g, in_channels, 8, 8, 0.6, 0.2) - self.conv2 = GATConv(g, 64, out_channels, 1, 0.6, 0.2) - - def forward(self, x): - x = F.dropout(x, p=0.6, training=self.training) - x = F.elu(self.conv1(x)) - x = F.dropout(x, p=0.6, training=self.training) - x = self.conv2(x) - return F.log_softmax(x, dim=1) - - -class GATSPMVConv(torch.nn.Module): - def __init__(self, g, in_channels, out_channels, heads=1, - negative_slope=0.2, dropout=0): - super().__init__() - self.g = g - self.out_channels = out_channels - self.heads = heads - self.negative_slope = negative_slope - self.dropout = dropout - self.weight = Parameter(torch.Tensor(in_channels, - heads * out_channels)) - self.att_l = Parameter(torch.Tensor(heads, out_channels, 1)) - self.att_r = Parameter(torch.Tensor(heads, out_channels, 1)) - self.bias = Parameter(torch.Tensor(heads * out_channels)) - self.softmax = EdgeSoftmax() - self.reset_parameters() - - def reset_parameters(self): - glorot(self.weight) - glorot(self.att_l) - glorot(self.att_r) - zeros(self.bias) - - def forward(self, x): - x = torch.matmul(x, self.weight) - x = x.reshape((x.size(0), self.heads, -1)) # NxHxD' - head_x = x.transpose(0, 1) # HxNxD' - a1 = torch.bmm(head_x, self.att_l).transpose(0, 1) # NxHx1 - a2 = torch.bmm(head_x, self.att_r).transpose(0, 1) # NxHx1 - self.g.ndata.update({'x': x, 'a1': a1, 'a2': a2}) - self.g.apply_edges(self.edge_attention) - self.edge_softmax() - self.g.update_all(fn.src_mul_edge('x', 'a', 'x'), fn.sum('x', 'x')) - x = self.g.ndata['x'] / self.g.ndata['z'] # NxHxD' - return x.view(-1, self.heads * self.out_channels) - - def edge_attention(self, edge): - a = F.leaky_relu(edge.src['a1'] + edge.dst['a2'], self.negative_slope) - return {'a': a} - - def edge_softmax(self): - alpha, normalizer = self.softmax(self.g.edata['a'], self.g) - self.g.ndata['z'] = normalizer - if self.training and self.dropout > 0: - alpha = F.dropout(alpha, p=self.dropout, training=True) - self.g.edata['a'] = alpha - - -class GATSPMV(torch.nn.Module): - def __init__(self, g, in_channels, out_channels): - super().__init__() - self.g = g - self.conv1 = GATSPMVConv(g, in_channels, 8, 8, 0.6, 0.2) - self.conv2 = GATSPMVConv(g, 64, out_channels, 1, 0.6, 0.2) - - def forward(self, x): - x = F.dropout(x, p=0.6, training=self.training) - x = F.elu(self.conv1(x)) - x = F.dropout(x, p=0.6, training=self.training) - x = self.conv2(x) - return F.log_softmax(x, dim=1) diff --git a/pytorch_geometric-2.3.1/benchmark/runtime/dgl/gcn.py b/pytorch_geometric-2.3.1/benchmark/runtime/dgl/gcn.py deleted file mode 100644 index 985caa7..0000000 --- a/pytorch_geometric-2.3.1/benchmark/runtime/dgl/gcn.py +++ /dev/null @@ -1,80 +0,0 @@ -import dgl.function as fn -import torch -import torch.nn.functional as F -from torch.nn import Parameter - -from torch_geometric.nn.inits import glorot, zeros - - -class GCNConv(torch.nn.Module): - def __init__(self, g, in_channels, out_channels): - super().__init__() - self.g = g - self.weight = Parameter(torch.Tensor(in_channels, out_channels)) - self.bias = Parameter(torch.Tensor(out_channels)) - self.reset_parameters() - - def reset_parameters(self): - glorot(self.weight) - zeros(self.bias) - - def gcn_msg(self, edge): - return {'m': edge.src['x'] * edge.src['norm']} - - def gcn_reduce(self, node): - return {'x': node.mailbox['m'].sum(dim=1) * node.data['norm']} - - def forward(self, x): - self.g.ndata['x'] = torch.matmul(x, self.weight) - self.g.update_all(self.gcn_msg, self.gcn_reduce) - x = self.g.ndata.pop('x') - x = x + self.bias - return x - - -class GCN(torch.nn.Module): - def __init__(self, g, in_channels, out_channels): - super().__init__() - self.conv1 = GCNConv(g, in_channels, 16) - self.conv2 = GCNConv(g, 16, out_channels) - - def forward(self, x): - x = F.relu(self.conv1(x)) - x = F.dropout(x, training=self.training) - x = self.conv2(x) - return F.log_softmax(x, dim=1) - - -class GCNSPMVConv(torch.nn.Module): - def __init__(self, g, in_channels, out_channels): - super().__init__() - self.g = g - self.weight = Parameter(torch.Tensor(in_channels, out_channels)) - self.bias = Parameter(torch.Tensor(out_channels)) - self.reset_parameters() - - def reset_parameters(self): - glorot(self.weight) - zeros(self.bias) - - def forward(self, x): - x = torch.matmul(x, self.weight) - self.g.ndata['x'] = x * self.g.ndata['norm'] - self.g.update_all(fn.copy_src(src='x', out='m'), - fn.sum(msg='m', out='x')) - x = self.g.ndata.pop('x') * self.g.ndata['norm'] - x = x + self.bias - return x - - -class GCNSPMV(torch.nn.Module): - def __init__(self, g, in_channels, out_channels): - super().__init__() - self.conv1 = GCNSPMVConv(g, in_channels, 16) - self.conv2 = GCNSPMVConv(g, 16, out_channels) - - def forward(self, x): - x = F.relu(self.conv1(x)) - x = F.dropout(x, training=self.training) - x = self.conv2(x) - return F.log_softmax(x, dim=1) diff --git a/pytorch_geometric-2.3.1/benchmark/runtime/dgl/main.py b/pytorch_geometric-2.3.1/benchmark/runtime/dgl/main.py deleted file mode 100644 index bf8dbbd..0000000 --- a/pytorch_geometric-2.3.1/benchmark/runtime/dgl/main.py +++ /dev/null @@ -1,52 +0,0 @@ -from itertools import product - -import dgl -import torch -from dgl import DGLGraph -from dgl.contrib.data import load_data -from dgl.data import citation_graph -from runtime.dgl.gat import GAT, GATSPMV -from runtime.dgl.gcn import GCN, GCNSPMV -from runtime.dgl.hidden import HiddenPrint -from runtime.dgl.rgcn import RGCN, RGCNSPMV -from runtime.dgl.train import train_runtime - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -with HiddenPrint(): - Cora = citation_graph.load_cora() - CiteSeer = citation_graph.load_citeseer() - PubMed = citation_graph.load_pubmed() - MUTAG = load_data('mutag') # fair comparison - -# One training run before we start tracking duration to warm up GPU. -g = DGLGraph(Cora.graph) -g.set_n_initializer(dgl.init.zero_initializer) -g.add_edges(g.nodes(), g.nodes()) -norm = torch.pow(g.in_degrees().float(), -0.5) -norm[torch.isinf(norm)] = 0 -g.ndata['norm'] = norm.unsqueeze(1).to(device) -model = GCNSPMV(g, Cora.features.shape[1], Cora.num_labels).to(device) -train_runtime(model, Cora, epochs=200, device=device) - -for d, Net in product([Cora, CiteSeer, PubMed], [GCN, GCNSPMV, GAT, GATSPMV]): - g = DGLGraph(d.graph) - g.set_n_initializer(dgl.init.zero_initializer) - g.add_edges(g.nodes(), g.nodes()) - norm = torch.pow(g.in_degrees().float(), -0.5) - norm[torch.isinf(norm)] = 0 - g.ndata['norm'] = norm.unsqueeze(1).to(device) - model = Net(g, d.features.shape[1], d.num_labels).to(device) - t = train_runtime(model, d, epochs=200, device=device) - print(f'{d.name} - {Net.__name__}: {t:.2f}s') - -for d, Net in product([MUTAG], [RGCN, RGCNSPMV]): - g = DGLGraph() - g.add_nodes(d.num_nodes) - g.add_edges(d.edge_src, d.edge_dst) - edge_type = torch.from_numpy(d.edge_type).to(device) - edge_norm = torch.from_numpy(d.edge_norm).to(device) - g.edata.update({'type': edge_type, 'norm': edge_norm}) - g.ndata['id'] = torch.arange(d.num_nodes, dtype=torch.long, device=device) - model = Net(g, d.num_nodes, d.num_classes, d.num_rels) - t = train_runtime(model, d, epochs=200, device=device) - print(f'{d.name} - {Net.__name__}: {t:.2f}s') diff --git a/pytorch_geometric-2.3.1/benchmark/runtime/dgl/rgcn.py b/pytorch_geometric-2.3.1/benchmark/runtime/dgl/rgcn.py deleted file mode 100644 index 4c7125d..0000000 --- a/pytorch_geometric-2.3.1/benchmark/runtime/dgl/rgcn.py +++ /dev/null @@ -1,149 +0,0 @@ -import dgl.function as fn -import torch -import torch.nn.functional as F -from torch.nn import Parameter as Param - -from torch_geometric.nn.inits import uniform - - -class RGCNConv(torch.nn.Module): - def __init__(self, g, in_channels, out_channels, num_relations, num_bases): - super().__init__() - - self.g = g - self.in_channels = in_channels - self.out_channels = out_channels - self.num_relations = num_relations - self.num_bases = num_bases - - self.basis = Param(torch.Tensor(num_bases, in_channels, out_channels)) - self.att = Param(torch.Tensor(num_relations, num_bases)) - self.root = Param(torch.Tensor(in_channels, out_channels)) - self.bias = Param(torch.Tensor(out_channels)) - - self.reset_parameters() - - def reset_parameters(self): - size = self.num_bases * self.in_channels - uniform(size, self.basis) - uniform(size, self.att) - uniform(size, self.root) - uniform(size, self.bias) - - def rgcn_reduce(self, node): - return {'x': node.mailbox['m'].sum(dim=1)} - - def forward(self, x): - self.w = torch.matmul(self.att, self.basis.view(self.num_bases, -1)) - self.w = self.w.view(self.num_relations, self.in_channels, - self.out_channels) - - if x is None: - - def msg_func(edge): - w = self.w.view(-1, self.out_channels) - index = edge.data['type'] * self.in_channels + edge.src['id'] - m = w.index_select(0, index) * edge.data['norm'].unsqueeze(1) - return {'m': m} - else: - self.g.ndata['x'] = x - - def msg_func(edge): - w = self.w.index_select(0, edge.data['type']) - m = torch.bmm(edge.src['x'].unsqueeze(1), w).squeeze() - m = m * edge.data['norm'].unsqueeze(1) - return {'m': m} - - self.g.update_all(msg_func, self.rgcn_reduce) - out = self.g.ndata.pop('x') - - if x is None: - out = out + self.root - else: - out = out + torch.matmul(x, self.root) - - out = out + self.bias - return out - - -class RGCN(torch.nn.Module): - def __init__(self, g, in_channels, out_channels, num_relations): - super().__init__() - self.conv1 = RGCNConv(g, in_channels, 16, num_relations, num_bases=30) - self.conv2 = RGCNConv(g, 16, out_channels, num_relations, num_bases=30) - - def forward(self, x): - x = F.relu(self.conv1(None)) - x = self.conv2(x) - return F.log_softmax(x, dim=1) - - -class RGCNSPMVConv(torch.nn.Module): - def __init__(self, g, in_channels, out_channels, num_relations, num_bases): - super().__init__() - - self.g = g - self.in_channels = in_channels - self.out_channels = out_channels - self.num_relations = num_relations - self.num_bases = num_bases - - self.basis = Param(torch.Tensor(num_bases, in_channels, out_channels)) - self.att = Param(torch.Tensor(num_relations, num_bases)) - self.root = Param(torch.Tensor(in_channels, out_channels)) - self.bias = Param(torch.Tensor(out_channels)) - - self.reset_parameters() - - def reset_parameters(self): - size = self.num_bases * self.in_channels - uniform(size, self.basis) - uniform(size, self.att) - uniform(size, self.root) - uniform(size, self.bias) - - def forward(self, x): - self.w = torch.matmul(self.att, self.basis.view(self.num_bases, -1)) - self.w = self.w.view(self.num_relations, self.in_channels, - self.out_channels) - - if x is None: - - def msg_func(edge): - w = self.w.view(-1, self.out_channels) - index = edge.data['type'] * self.in_channels + edge.src['id'] - m = w.index_select(0, index) * edge.data['norm'].unsqueeze(1) - return {'m': m} - else: - self.g.ndata['x'] = x - - def msg_func(edge): - w = self.w.index_select(0, edge.data['type']) - m = torch.bmm(edge.src['x'].unsqueeze(1), w).squeeze() - m = m * edge.data['norm'].unsqueeze(1) - return {'m': m} - - self.g.update_all(msg_func, fn.sum(msg='m', out='x')) - out = self.g.ndata.pop('x') - - if x is None: - out = out + self.root - else: - out = out + torch.matmul(x, self.root) - - out = out + self.bias - return out - - -class RGCNSPMV(torch.nn.Module): - def __init__(self, g, in_channels, out_channels, num_relations): - super().__init__() - self.conv1 = RGCNSPMVConv(g, in_channels, 16, num_relations, - num_bases=30) - self.conv2 = RGCNSPMVConv(g, 16, out_channels, num_relations, - num_bases=30) - - def forward(self, x): - x = F.relu(self.conv1(None)) - x = self.conv2(x) - return F.log_softmax(x, dim=1) diff --git a/pytorch_geometric-2.3.1/benchmark/runtime/dgl/train.py b/pytorch_geometric-2.3.1/benchmark/runtime/dgl/train.py deleted file mode 100644 index f74ecdf..0000000 --- a/pytorch_geometric-2.3.1/benchmark/runtime/dgl/train.py +++ /dev/null @@ -1,34 +0,0 @@ -import time - -import torch -import torch.nn.functional as F - - -def train_runtime(model, data, epochs, device): - if hasattr(data, 'features'): - x = torch.tensor(data.features, dtype=torch.float, device=device) - else: - x = None - mask = data.train_mask if hasattr(data, 'train_mask') else data.train_idx - y = torch.tensor(data.labels, dtype=torch.long, device=device)[mask] - - model = model.to(device) - model.train() - optimizer = torch.optim.Adam(model.parameters(), lr=0.01) - - if torch.cuda.is_available(): - torch.cuda.synchronize() - t_start = time.perf_counter() - - for epoch in range(epochs): - optimizer.zero_grad() - out = model(x) - loss = F.nll_loss(out[mask], y.view(-1)) - loss.backward() - optimizer.step() - - if torch.cuda.is_available(): - torch.cuda.synchronize() - t_end = time.perf_counter() - - return t_end - t_start diff --git a/pytorch_geometric-2.3.1/benchmark/runtime/main.py b/pytorch_geometric-2.3.1/benchmark/runtime/main.py deleted file mode 100644 index 798a22f..0000000 --- a/pytorch_geometric-2.3.1/benchmark/runtime/main.py +++ /dev/null @@ -1,31 +0,0 @@ -import os.path as osp -from itertools import product - -import torch -from runtime.gat import GAT -from runtime.gcn import GCN -from runtime.rgcn import RGCN -from runtime.train import train_runtime - -from torch_geometric.datasets import Entities, Planetoid - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -root = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data') -Cora = Planetoid(osp.join(root, 'Cora'), 'Cora') -CiteSeer = Planetoid(osp.join(root, 'CiteSeer'), 'CiteSeer') -PubMed = Planetoid(osp.join(root, 'PubMed'), 'PubMed') -MUTAG = Entities(osp.join(root, 'EntitiesMUTAG'), 'MUTAG') - -# One training run before we start tracking duration to warm up GPU. -model = GCN(Cora.num_features, Cora.num_classes) -train_runtime(model, Cora[0], epochs=200, device=device) - -for d, Net in product([Cora, CiteSeer, PubMed], [GCN, GAT]): - model = Net(d.num_features, d.num_classes) - t = train_runtime(model, d[0], epochs=200, device=device) - print(f'{str(d)[:-2]} - {Net.__name__}: {t:.2f}s') - -for d, Net in product([MUTAG], [RGCN]): - model = Net(d[0].num_nodes, d.num_classes, d.num_relations) - t = train_runtime(model, d[0], epochs=200, device=device) - print(f'{str(d)[:-2]} - {Net.__name__}: {t:.2f}s') diff --git a/pytorch_geometric-2.3.1/benchmark/runtime/train.py b/pytorch_geometric-2.3.1/benchmark/runtime/train.py deleted file mode 100644 index bd99519..0000000 --- a/pytorch_geometric-2.3.1/benchmark/runtime/train.py +++ /dev/null @@ -1,30 +0,0 @@ -import time - -import torch -import torch.nn.functional as F - - -def train_runtime(model, data, epochs, device): - optimizer = torch.optim.Adam(model.parameters(), lr=0.01) - model = model.to(device) - data = data.to(device) - model.train() - mask = data.train_mask if 'train_mask' in data else data.train_idx - y = data.y[mask] if 'train_mask' in data else data.train_y - - if torch.cuda.is_available(): - torch.cuda.synchronize() - t_start = time.perf_counter() - - for epoch in range(epochs): - optimizer.zero_grad() - out = model(data) - loss = F.nll_loss(out[mask], y) - loss.backward() - optimizer.step() - - if torch.cuda.is_available(): - torch.cuda.synchronize() - t_end = time.perf_counter() - - return t_end - t_start diff --git a/pytorch_geometric-2.3.1/benchmark/utils/__init__.py b/pytorch_geometric-2.3.1/benchmark/utils/__init__.py deleted file mode 100644 index 376545b..0000000 --- a/pytorch_geometric-2.3.1/benchmark/utils/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from .utils import emit_itt -from .utils import get_dataset, get_dataset_with_transformation -from .utils import get_model -from .utils import get_split_masks -from .utils import save_benchmark_data, write_to_csv - -__all__ = [ - 'emit_itt', - 'get_dataset', - 'get_dataset_with_transformation', - 'get_model', - 'get_split_masks', - 'save_benchmark_data', - 'write_to_csv', -] diff --git a/pytorch_geometric-2.3.1/benchmark/utils/utils.py b/pytorch_geometric-2.3.1/benchmark/utils/utils.py deleted file mode 100644 index 27d8748..0000000 --- a/pytorch_geometric-2.3.1/benchmark/utils/utils.py +++ /dev/null @@ -1,147 +0,0 @@ -import os -import os.path as osp -from datetime import datetime - -import pandas as pd -import torch -from ogb.nodeproppred import PygNodePropPredDataset - -import torch_geometric.transforms as T -from torch_geometric.datasets import OGB_MAG, Reddit -from torch_geometric.nn import GAT, GCN, PNA, EdgeCNN, GraphSAGE -from torch_geometric.utils import index_to_mask - -from .hetero_gat import HeteroGAT -from .hetero_sage import HeteroGraphSAGE - -try: - from torch.autograd.profiler import emit_itt -except ImportError: - from contextlib import contextmanager - - @contextmanager - def emit_itt(*args, **kwargs): - yield - - -models_dict = { - 'edge_cnn': EdgeCNN, - 'gat': GAT, - 'gcn': GCN, - 'pna': PNA, - 'sage': GraphSAGE, - 'rgat': HeteroGAT, - 'rgcn': HeteroGraphSAGE, -} - - -def get_dataset_with_transformation(name, root, use_sparse_tensor=False, - bf16=False): - path = osp.join(osp.dirname(osp.realpath(__file__)), root, name) - transform = T.ToSparseTensor( - remove_edge_index=False) if use_sparse_tensor else None - if name == 'ogbn-mag': - if transform is None: - transform = T.ToUndirected(merge=True) - else: - transform = T.Compose([T.ToUndirected(merge=True), transform]) - dataset = OGB_MAG(root=path, preprocess='metapath2vec', - transform=transform) - elif name == 'ogbn-products': - if transform is None: - transform = T.RemoveDuplicatedEdges() - else: - transform = T.Compose([T.RemoveDuplicatedEdges(), transform]) - - dataset = PygNodePropPredDataset('ogbn-products', root=path, - transform=transform) - - elif name == 'Reddit': - dataset = Reddit(root=path, transform=transform) - - data = dataset[0] - - if name == 'ogbn-products': - split_idx = dataset.get_idx_split() - data.train_mask = index_to_mask(split_idx['train'], - size=data.num_nodes) - data.val_mask = index_to_mask(split_idx['valid'], size=data.num_nodes) - data.test_mask = index_to_mask(split_idx['test'], size=data.num_nodes) - data.y = data.y.squeeze() - - if bf16: - data.x = data.x.to(torch.bfloat16) - - return data, dataset.num_classes, transform - - -def get_dataset(name, root, use_sparse_tensor=False, bf16=False): - data, num_classes, _ = get_dataset_with_transformation( - name, root, use_sparse_tensor, bf16) - return data, num_classes - - -def get_model(name, params, metadata=None): - Model = models_dict.get(name, None) - assert Model is not None, f'Model {name} not supported!' - - if name == 'rgat': - return Model(metadata, params['hidden_channels'], params['num_layers'], - params['output_channels'], params['num_heads']) - - if name == 'rgcn': - return Model(metadata, params['hidden_channels'], params['num_layers'], - params['output_channels']) - - if name == 'gat': - return Model(params['inputs_channels'], params['hidden_channels'], - params['num_layers'], params['output_channels'], - heads=params['num_heads']) - - if name == 'pna': - return Model(params['inputs_channels'], params['hidden_channels'], - params['num_layers'], params['output_channels'], - aggregators=['mean', 'min', 'max', 'std'], - scalers=['identity', 'amplification', - 'attenuation'], deg=params['degree']) - - return Model(params['inputs_channels'], params['hidden_channels'], - params['num_layers'], params['output_channels']) - - -def get_split_masks(data, dataset_name): - if dataset_name == 'ogbn-mag': - train_mask = ('paper', data['paper'].train_mask) - test_mask = ('paper', data['paper'].test_mask) - val_mask = ('paper', data['paper'].val_mask) - else: - train_mask = data.train_mask - val_mask = data.val_mask - test_mask = data.test_mask - return train_mask, val_mask, test_mask - - -def save_benchmark_data(csv_data, batch_size, layers, num_neighbors, - hidden_channels, total_time, model_name, dataset_name, - use_sparse_tensor): - config = f'Batch size={batch_size}, ' \ - f'#Layers={layers}, ' \ - f'#Neighbors={num_neighbors}, ' \ - f'#Hidden features={hidden_channels}' - csv_data['DATE'].append(datetime.now().date()) - csv_data['TIME (s)'].append(round(total_time, 2)) - csv_data['MODEL'].append(model_name) - csv_data['DATASET'].append(dataset_name) - csv_data['CONFIG'].append(config) - csv_data['SPARSE'].append(use_sparse_tensor) - - -def write_to_csv(csv_data, training=False): - results_path = osp.join(osp.dirname(osp.realpath(__file__)), '../results/') - os.makedirs(results_path, exist_ok=True) - - name = 'training' if training else 'inference' - csv_path = osp.join(results_path, f'TOTAL_{name}_benchmark.csv') - with_header = not osp.exists(csv_path) - df = pd.DataFrame(csv_data) - df.to_csv(csv_path, mode='a', index_label='TEST_ID', header=with_header) diff --git a/pytorch_geometric-2.3.1/conda/pyg/README.md b/pytorch_geometric-2.3.1/conda/pyg/README.md deleted file mode 100644 index 6207e22..0000000 --- a/pytorch_geometric-2.3.1/conda/pyg/README.md +++ /dev/null @@ -1,3 +0,0 @@ -``` -./build_conda.sh 3.9 2.0.0 cu117 # python, pytorch and cuda version -``` diff --git a/pytorch_geometric-2.3.1/conda/pyg/build_conda.sh b/pytorch_geometric-2.3.1/conda/pyg/build_conda.sh deleted file mode 100644 index 33ea72d..0000000 --- a/pytorch_geometric-2.3.1/conda/pyg/build_conda.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -export PYTHON_VERSION=$1 -export TORCH_VERSION=$2 -export CUDA_VERSION=$3 - -export CONDA_PYTORCH_CONSTRAINT="pytorch==${TORCH_VERSION%.*}.*" - -if [ "${CUDA_VERSION}" = "cpu" ]; then - export CONDA_CUDATOOLKIT_CONSTRAINT="cpuonly # [not osx]" -else - case $CUDA_VERSION in - cu118) - export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.8.*" - ;; - cu117) - export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.7.*" - ;; - cu116) - if [ "${TORCH_VERSION}" = "1.12.0" ]; then - export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.6.*" - else - export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.6.*" - fi - ;; - cu115) - export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.5.*" - ;; - cu113) - export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.3.*" - ;; - cu111) - export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.1.*" - ;; - cu102) - export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==10.2.*" - ;; - cu101) - export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==10.1.*" - ;; - *) - echo "Unrecognized CUDA_VERSION=$CUDA_VERSION" - exit 1 - ;; - esac -fi - -echo "PyTorch $TORCH_VERSION+$CUDA_VERSION" -echo "- $CONDA_PYTORCH_CONSTRAINT" -echo "- $CONDA_CUDATOOLKIT_CONSTRAINT" - -if [ "${TORCH_VERSION}" = "1.12.0" ] && [ "${CUDA_VERSION}" = "cu116" ]; then - conda build . -c pytorch -c pyg -c default -c nvidia -c conda-forge --output-folder "$HOME/conda-bld" -else - conda build . -c pytorch -c pyg -c default -c nvidia --output-folder "$HOME/conda-bld" -fi diff --git a/pytorch_geometric-2.3.1/conda/pyg/meta.yaml b/pytorch_geometric-2.3.1/conda/pyg/meta.yaml deleted file mode 100644 index ad1f904..0000000 --- a/pytorch_geometric-2.3.1/conda/pyg/meta.yaml +++ /dev/null @@ -1,42 +0,0 @@ -package: - name: pyg - version: 2.3.0 - -source: - url: https://files.pythonhosted.org/packages/43/b5/be9795db7756e6c1fa2606c8145ec637552487e72c6428ed0b231f8bcbd3/torch_geometric-2.3.0.tar.gz - -requirements: - host: - - pip - - python {{ environ.get('PYTHON_VERSION') }} - - run: - - python {{ environ.get('PYTHON_VERSION') }} - - {{ environ.get('CONDA_PYTORCH_CONSTRAINT') }} - - {{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }} - - psutil - - tqdm - - jinja2 - - pyparsing - - numpy - - scipy - - requests - - scikit-learn - -build: - string: py{{ environ.get('PYTHON_VERSION').replace('.', '') }}_torch_{{ environ['TORCH_VERSION'] }}_{{ environ['CUDA_VERSION'] }} - script: pip install . - -test: - imports: - - torch_geometric - - torch_geometric.nn - - torch_geometric.data - - torch_geometric.utils - - torch_geometric.datasets - - torch_geometric.transforms - -about: - home: https://github.com/pyg-team/pytorch_geometric - license: MIT - summary: Graph Neural Network Library for PyTorch diff --git a/pytorch_geometric-2.3.1/conda/pytorch-geometric/README.md b/pytorch_geometric-2.3.1/conda/pytorch-geometric/README.md deleted file mode 100644 index 6207e22..0000000 --- a/pytorch_geometric-2.3.1/conda/pytorch-geometric/README.md +++ /dev/null @@ -1,3 +0,0 @@ -``` -./build_conda.sh 3.9 2.0.0 cu117 # python, pytorch and cuda version -``` diff --git a/pytorch_geometric-2.3.1/conda/pytorch-geometric/build_conda.sh b/pytorch_geometric-2.3.1/conda/pytorch-geometric/build_conda.sh deleted file mode 100644 index fcddcd0..0000000 --- a/pytorch_geometric-2.3.1/conda/pytorch-geometric/build_conda.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -export PYTHON_VERSION=$1 -export TORCH_VERSION=$2 -export CUDA_VERSION=$3 - -export CONDA_PYTORCH_CONSTRAINT="pytorch==${TORCH_VERSION%.*}.*" - -if [ "${CUDA_VERSION}" = "cpu" ]; then - export CONDA_CUDATOOLKIT_CONSTRAINT="cpuonly # [not osx]" -else - case $CUDA_VERSION in - cu118) - export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.8.*" - ;; - cu117) - export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.7.*" - ;; - cu116) - if [ "${TORCH_VERSION}" = "1.12.0" ]; then - export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.6.*" - else - export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.6.*" - fi - ;; - cu115) - export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.5.*" - ;; - cu113) - export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.3.*" - ;; - cu111) - export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.1.*" - ;; - cu102) - export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==10.2.*" - ;; - cu101) - export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==10.1.*" - ;; - *) - echo "Unrecognized CUDA_VERSION=$CUDA_VERSION" - exit 1 - ;; - esac -fi - -echo "PyTorch $TORCH_VERSION+$CUDA_VERSION" -echo "- $CONDA_PYTORCH_CONSTRAINT" -echo "- $CONDA_CUDATOOLKIT_CONSTRAINT" - -if [ "${TORCH_VERSION}" = "1.12.0" ] && [ "${CUDA_VERSION}" = "cu116" ]; then - conda build . -c pytorch -c rusty1s -c default -c nvidia -c conda-forge --output-folder "$HOME/conda-bld" -else - conda build . -c pytorch -c rusty1s -c default -c nvidia --output-folder "$HOME/conda-bld" -fi diff --git a/pytorch_geometric-2.3.1/conda/pytorch-geometric/meta.yaml b/pytorch_geometric-2.3.1/conda/pytorch-geometric/meta.yaml deleted file mode 100644 index a93e0b6..0000000 --- a/pytorch_geometric-2.3.1/conda/pytorch-geometric/meta.yaml +++ /dev/null @@ -1,42 +0,0 @@ -package: - name: pytorch-geometric - version: 2.3.0 - -source: - url: https://files.pythonhosted.org/packages/43/b5/be9795db7756e6c1fa2606c8145ec637552487e72c6428ed0b231f8bcbd3/torch_geometric-2.3.0.tar.gz - -requirements: - host: - - pip - - python {{ environ.get('PYTHON_VERSION') }} - - run: - - python {{ environ.get('PYTHON_VERSION') }} - - {{ environ.get('CONDA_PYTORCH_CONSTRAINT') }} - - {{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }} - - psutil - - tqdm - - jinja2 - - pyparsing - - numpy - - scipy - - requests - - scikit-learn - -build: - string: py{{ environ.get('PYTHON_VERSION').replace('.', '') }}_torch_{{ environ['TORCH_VERSION'] }}_{{ environ['CUDA_VERSION'] }} - script: pip install . - -test: - imports: - - torch_geometric - - torch_geometric.nn - - torch_geometric.data - - torch_geometric.utils - - torch_geometric.datasets - - torch_geometric.transforms - -about: - home: https://github.com/pyg-team/pytorch_geometric - license: MIT - summary: Graph Neural Network Library for PyTorch diff --git a/pytorch_geometric-2.3.1/docs/requirements.txt b/pytorch_geometric-2.3.1/docs/requirements.txt deleted file mode 100644 index f29171c..0000000 --- a/pytorch_geometric-2.3.1/docs/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -https://download.pytorch.org/whl/cpu/torch-1.9.0%2Bcpu-cp38-cp38-linux_x86_64.whl -numpy>=1.19.5 -git+https://github.com/pyg-team/pyg_sphinx_theme.git diff --git a/pytorch_geometric-2.3.1/docs/source/advanced/batching.rst b/pytorch_geometric-2.3.1/docs/source/advanced/batching.rst deleted file mode 100644 index 8a55de4..0000000 --- a/pytorch_geometric-2.3.1/docs/source/advanced/batching.rst +++ /dev/null @@ -1,233 +0,0 @@ -Advanced Mini-Batching -====================== - -The creation of mini-batching is crucial for letting the training of a deep learning model scale to huge amounts of data. -Instead of processing examples one-by-one, a mini-batch groups a set of examples into a unified representation where it can efficiently be processed in parallel. -In the image or language domain, this procedure is typically achieved by rescaling or padding each example into a set to equally-sized shapes, and examples are then grouped in an additional dimension. -The length of this dimension is then equal to the number of examples grouped in a mini-batch and is typically referred to as the :obj:`batch_size`. - -Since graphs are one of the most general data structures that can hold *any* number of nodes or edges, the two approaches described above are either not feasible or may result in a lot of unnecessary memory consumption. -In :pyg:`PyG`, we opt for another approach to achieve parallelization across a number of examples. -Here, adjacency matrices are stacked in a diagonal fashion (creating a giant graph that holds multiple isolated subgraphs), and node and target features are simply concatenated in the node dimension, *i.e.* - -.. math:: - - \mathbf{A} = \begin{bmatrix} \mathbf{A}_1 & & \\ & \ddots & \\ & & \mathbf{A}_n \end{bmatrix}, \qquad \mathbf{X} = \begin{bmatrix} \mathbf{X}_1 \\ \vdots \\ \mathbf{X}_n \end{bmatrix}, \qquad \mathbf{Y} = \begin{bmatrix} \mathbf{Y}_1 \\ \vdots \\ \mathbf{Y}_n \end{bmatrix}. - -This procedure has some crucial advantages over other batching procedures: - -1. GNN operators that rely on a message passing scheme do not need to be modified since messages still cannot be exchanged between two nodes that belong to different graphs. - -2. There is no computational or memory overhead. - For example, this batching procedure works completely without any padding of node or edge features. - Note that there is no additional memory overhead for adjacency matrices since they are saved in a sparse fashion holding only non-zero entries, *i.e.*, the edges. - -:pyg:`PyG` automatically takes care of batching multiple graphs into a single giant graph with the help of the :class:`torch_geometric.loader.DataLoader` class. -Internally, :class:`~torch_geometric.loader.DataLoader` is just a regular :pytorch:`PyTorch` :class:`torch.utils.data.DataLoader` that overwrites its :func:`collate` functionality, *i.e.*, the definition of how a list of examples should be grouped together. -Therefore, all arguments that can be passed to a :pytorch:`PyTorch` :class:`~torch.utils.data.DataLoader` can also be passed to a :pyg:`PyG` :class:`~torch_geometric.loader.DataLoader`, *e.g.*, the number of workers :obj:`num_workers`. - -In its most general form, the :pyg:`PyG` :class:`~torch_geometric.loader.DataLoader` will automatically increment the :obj:`edge_index` tensor by the cumulated number of nodes of all graphs that got collated before the currently processed graph, and will concatenate :obj:`edge_index` tensors (that are of shape :obj:`[2, num_edges]`) in the second dimension. -The same is true for :obj:`face` tensors, *i.e.*, face indices in meshes. -All other tensors will just get concatenated in the first dimension without any further increasement of their values. - -However, there are a few special use-cases (as outlined below) where the user actively wants to modify this behavior to its own needs. -:pyg:`PyG` allows modification to the underlying batching procedure by overwriting the :func:`torch_geometric.data.Data.__inc__` and :func:`torch_geometric.data.Data.__cat_dim__` functionalities. -Without any modifications, these are defined as follows in the :class:`~torch_geometric.data.Data` class: - -.. code-block:: python - - def __inc__(self, key, value, *args, **kwargs): - if 'index' in key or 'face' in key: - return self.num_nodes - else: - return 0 - - def __cat_dim__(self, key, value, *args, **kwargs): - if 'index' in key or 'face' in key: - return 1 - else: - return 0 - -We can see that :meth:`__inc__` defines the incremental count between two consecutive graph attributes, where as :meth:`__cat_dim__` defines in which dimension graph tensors of the same attribute should be concatenated together. -Both functions are called for each attribute stored in the :class:`~torch_geometric.data.Data` class, and get passed their specific :obj:`key` and value :obj:`item` as arguments. - -In what follows, we present a few use-cases where the modification of :func:`__inc__` and :func:`__cat_dim__` might be absolutely necessary. - -Pairs of Graphs ---------------- - -In case you want to store multiple graphs in a single :class:`~torch_geometric.data.Data` object, *e.g.*, for applications such as graph matching, you need to ensure correct batching behavior across all those graphs. -For example, consider storing two graphs, a source graph :math:`\mathcal{G}_s` and a target graph :math:`\mathcal{G}_t` in a :class:`~torch_geometric.data.Data`, *e.g.*: - -.. code-block:: python - - from torch_geometric.data import Data - - class PairData(Data): - def __init__(self, edge_index_s=None, x_s=None, edge_index_t=None, x_t=None): - super().__init__() - self.edge_index_s = edge_index_s - self.x_s = x_s - self.edge_index_t = edge_index_t - self.x_t = x_t - -In this case, :obj:`edge_index_s` should be increased by the number of nodes in the source graph :math:`\mathcal{G}_s`, *e.g.*, :obj:`x_s.size(0)`, and :obj:`edge_index_t` should be increased by the number of nodes in the target graph :math:`\mathcal{G}_t`, *e.g.*, :obj:`x_t.size(0)`: - -.. code-block:: python - - def __inc__(self, key, value, *args, **kwargs): - if key == 'edge_index_s': - return self.x_s.size(0) - if key == 'edge_index_t': - return self.x_t.size(0) - else: - return super().__inc__(key, value, *args, **kwargs) - -We can test our :class:`PairData` batching behavior by setting up a simple test script: - -.. code-block:: python - - from torch_geometric.loader import DataLoader - - edge_index_s = torch.tensor([ - [0, 0, 0, 0], - [1, 2, 3, 4], - ]) - x_s = torch.randn(5, 16) # 5 nodes. - edge_index_t = torch.tensor([ - [0, 0, 0], - [1, 2, 3], - ]) - x_t = torch.randn(4, 16) # 4 nodes. - - data = PairData(edge_index_s, x_s, edge_index_t, x_t) - data_list = [data, data] - loader = DataLoader(data_list, batch_size=2) - batch = next(iter(loader)) - - print(batch) - >>> PairDataBatch(edge_index_s=[2, 8], x_s=[10, 16], - edge_index_t=[2, 6], x_t=[8, 16]) - - print(batch.edge_index_s) - >>> tensor([[0, 0, 0, 0, 5, 5, 5, 5], - [1, 2, 3, 4, 6, 7, 8, 9]]) - - print(batch.edge_index_t) - >>> tensor([[0, 0, 0, 4, 4, 4], - [1, 2, 3, 5, 6, 7]]) - -Everything looks good so far! -:obj:`edge_index_s` and :obj:`edge_index_t` get correctly batched together, even when using different numbers of nodes for :math:`\mathcal{G}_s` and :math:`\mathcal{G}_t`. -However, the :obj:`batch` attribute (that maps each node to its respective graph) is missing since :pyg:`PyG` fails to identify the actual graph in the :class:`PairData` object. -That's where the :obj:`follow_batch` argument of the :class:`~torch_geometric.loader.DataLoader` comes into play. -Here, we can specify for which attributes we want to maintain the batch information: - -.. code-block:: python - - loader = DataLoader(data_list, batch_size=2, follow_batch=['x_s', 'x_t']) - batch = next(iter(loader)) - - print(batch) - >>> PairDataBatch(edge_index_s=[2, 8], x_s=[10, 16], x_s_batch=[10], - edge_index_t=[2, 6], x_t=[8, 16], x_t_batch=[8]) - print(batch.x_s_batch) - >>> tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) - - print(batch.x_t_batch) - >>> tensor([0, 0, 0, 0, 1, 1, 1, 1]) - -As one can see, :obj:`follow_batch=['x_s', 'x_t']` now successfully creates assignment vectors called :obj:`x_s_batch` and :obj:`x_t_batch` for the node features :obj:`x_s` and :obj:`x_t`, respectively. -That information can now be used to perform reduce operations, *e.g.*, global pooling, on multiple graphs in a single :class:`Batch` object. - -Bipartite Graphs ----------------- - -The adjacency matrix of a bipartite graph defines the relationship between nodes of two different node types. -In general, the number of nodes for each node type do not need to match, resulting in a non-quadratic adjacency matrix of shape :math:`\mathbf{A} \in \{ 0, 1 \}^{N \times M}` with :math:`N \neq M` potentially. -In a mini-batching procedure of bipartite graphs, the source nodes of edges in :obj:`edge_index` should get increased differently than the target nodes of edges in :obj:`edge_index`. -To achieve this, consider a bipartite graph between two node types with corresponding node features :obj:`x_s` and :obj:`x_t`, respectively: - -.. code-block:: python - - from torch_geometric.data import Data - - class BipartiteData(Data): - def __init__(self, edge_index=None, x_s=None, x_t=None): - super().__init__() - self.edge_index = edge_index - self.x_s = x_s - self.x_t = x_t - -For a correct mini-batching procedure in bipartite graphs, we need to tell :pyg:`PyG` that it should increment source and target nodes of edges in :obj:`edge_index` independently on each other: - -.. code-block:: python - - def __inc__(self, key, value, *args, **kwargs): - if key == 'edge_index': - return torch.tensor([[self.x_s.size(0)], [self.x_t.size(0)]]) - else: - return super().__inc__(key, value, *args, **kwargs) - -Here, :obj:`edge_index[0]` (the source nodes of edges) get incremented by :obj:`x_s.size(0)` while :obj:`edge_index[1]` (the target nodes of edges) get incremented by :obj:`x_t.size(0)`. -We can again test our implementation by running a simple test script: - -.. code-block:: python - - from torch_geometric.loader import DataLoader - - edge_index = torch.tensor([ - [0, 0, 1, 1], - [0, 1, 1, 2], - ]) - x_s = torch.randn(2, 16) # 2 nodes. - x_t = torch.randn(3, 16) # 3 nodes. - - data = BipartiteData(edge_index, x_s, x_t) - data_list = [data, data] - loader = DataLoader(data_list, batch_size=2) - batch = next(iter(loader)) - - print(batch) - >>> BipartiteDataBatch(edge_index=[2, 8], x_s=[4, 16], x_t=[6, 16]) - - print(batch.edge_index) - >>> tensor([[0, 0, 1, 1, 2, 2, 3, 3], - [0, 1, 1, 2, 3, 4, 4, 5]]) - -Again, this is exactly the behavior we aimed for! - -Batching Along New Dimensions ------------------------------ - -Sometimes, attributes of :obj:`data` objects should be batched by gaining a new batch dimension (as in classical mini-batching), *e.g.*, for graph-level properties or targets. -Specifically, a list of attributes of shape :obj:`[num_features]` should be returned as :obj:`[num_examples, num_features]` rather than :obj:`[num_examples * num_features]`. -:pyg:`PyG` achieves this by returning a concatenation dimension of :obj:`None` in :meth:`~torch_geometric.data.Data.__cat_dim__`: - -.. code-block:: python - - from torch_geometric.data import Data - from torch_geometric.loader import DataLoader - - class MyData(Data): - def __cat_dim__(self, key, value, *args, **kwargs): - if key == 'foo': - return None - else: - return super().__cat_dim__(key, value, *args, **kwargs) - - edge_index = torch.tensor([ - [0, 1, 1, 2], - [1, 0, 2, 1], - ]) - foo = torch.randn(16) - - data = MyData(edge_index=edge_index, foo=foo) - data_list = [data, data] - loader = DataLoader(data_list, batch_size=2) - batch = next(iter(loader)) - - print(batch) - >>> MyDataBatch(edge_index=[2, 8], foo=[2, 16]) - -As desired, :obj:`batch.foo` is now described by two dimensions: The batch dimension and the feature dimension. diff --git a/pytorch_geometric-2.3.1/docs/source/cheatsheet/data_cheatsheet.rst b/pytorch_geometric-2.3.1/docs/source/cheatsheet/data_cheatsheet.rst deleted file mode 100644 index 910ec7c..0000000 --- a/pytorch_geometric-2.3.1/docs/source/cheatsheet/data_cheatsheet.rst +++ /dev/null @@ -1,35 +0,0 @@ -Dataset Cheatsheet -================== - -.. note:: - - This dataset statistics table is a **work in progress**. - Please consider helping us filling its content by providing statistics for individual datasets. - See `here `__ and `here `__ for examples on how to do so. - -.. list-table:: - :widths: 50 10 10 10 10 10 - :header-rows: 1 - - * - Name - - #graphs - - #nodes - - #edges - - #features - - #classes/#tasks -{% for cls in torch_geometric.datasets.classes %} - * - :class:`~torch_geometric.datasets.{{ cls }}` {% if torch_geometric.datasets.utils.paper_link(cls) %}(`Paper <{{ torch_geometric.datasets.utils.paper_link(cls) }}>`__){% endif %} - - {%if torch_geometric.datasets.utils.has_stats(cls) %}{{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', default=1) }}{% else %}{{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', default='') }}{% endif %} - - {{ torch_geometric.datasets.utils.get_stat(cls, '#nodes', default='') }} - - {{ torch_geometric.datasets.utils.get_stat(cls, '#edges', default='') }} - - {{ torch_geometric.datasets.utils.get_stat(cls, '#features', default='') }} - - {{ torch_geometric.datasets.utils.get_stat(cls, '#classes', default='') }}{{ torch_geometric.datasets.utils.get_stat(cls, '#tasks', default='') }} - {% for child in torch_geometric.datasets.utils.get_children(cls) %} - * - └─ {{ child }} - - {{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', child, default=1) }} - - {{ torch_geometric.datasets.utils.get_stat(cls, '#nodes', child, default='') }} - - {{ torch_geometric.datasets.utils.get_stat(cls, '#edges', child, default='') }} - - {{ torch_geometric.datasets.utils.get_stat(cls, '#features', child, default='') }} - - {{ torch_geometric.datasets.utils.get_stat(cls, '#classes', child, default='') }}{{ torch_geometric.datasets.utils.get_stat(cls, '#tasks', child, default='') }} - {% endfor %} -{% endfor %} diff --git a/pytorch_geometric-2.3.1/docs/source/conf.py b/pytorch_geometric-2.3.1/docs/source/conf.py deleted file mode 100644 index d617ad4..0000000 --- a/pytorch_geometric-2.3.1/docs/source/conf.py +++ /dev/null @@ -1,53 +0,0 @@ -import datetime -import os.path as osp -import sys - -import pyg_sphinx_theme - -import torch_geometric - -author = 'PyG Team' -project = 'pytorch_geometric' -version = torch_geometric.__version__ -copyright = f'{datetime.datetime.now().year}, {author}' - -sys.path.append(osp.join(osp.dirname(pyg_sphinx_theme.__file__), 'extension')) - -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.autosummary', - 'sphinx.ext.intersphinx', - 'sphinx.ext.mathjax', - 'sphinx.ext.napoleon', - 'sphinx.ext.viewcode', - 'pyg', -] - -html_theme = 'pyg_sphinx_theme' -html_logo = ('https://raw.githubusercontent.com/pyg-team/pyg_sphinx_theme/' - 'master/pyg_sphinx_theme/static/img/pyg_logo.png') -html_favicon = ('https://raw.githubusercontent.com/pyg-team/pyg_sphinx_theme/' - 'master/pyg_sphinx_theme/static/img/favicon.png') -html_static_path = ['_static'] -templates_path = ['_templates'] - -add_module_names = False -autodoc_member_order = 'bysource' - -suppress_warnings = ['autodoc.import_object'] - -intersphinx_mapping = { - 'python': ('https://docs.python.org/', None), - 'numpy': ('http://docs.scipy.org/doc/numpy', None), - 'pandas': ('http://pandas.pydata.org/pandas-docs/dev', None), - 'torch': ('https://pytorch.org/docs/master', None), -} - - -def setup(app): - def rst_jinja_render(app, _, source): - rst_context = {'torch_geometric': torch_geometric} - source[0] = app.builder.templates.render_string(source[0], rst_context) - - app.connect('source-read', rst_jinja_render) - app.add_js_file('js/version_alert.js') diff --git a/pytorch_geometric-2.3.1/docs/source/modules/datasets.rst b/pytorch_geometric-2.3.1/docs/source/modules/datasets.rst deleted file mode 100644 index c2bf28e..0000000 --- a/pytorch_geometric-2.3.1/docs/source/modules/datasets.rst +++ /dev/null @@ -1,44 +0,0 @@ -torch_geometric.datasets -======================== - -Benchmark Datasets ------------------- - -.. currentmodule:: torch_geometric.datasets - -.. autosummary:: - :nosignatures: - :toctree: ../generated - :template: autosummary/only_class.rst - - {% for name in torch_geometric.datasets.classes %} - {{ name }} - {% endfor %} - -Graph Generators ----------------- - -.. currentmodule:: torch_geometric.datasets.graph_generator - -.. autosummary:: - :nosignatures: - :toctree: ../generated - :template: autosummary/only_class.rst - - {% for name in torch_geometric.datasets.graph_generator.classes %} - {{ name }} - {% endfor %} - -Motif Generators ----------------- - -.. currentmodule:: torch_geometric.datasets.motif_generator - -.. autosummary:: - :nosignatures: - :toctree: ../generated - :template: autosummary/only_class.rst - - {% for name in torch_geometric.datasets.motif_generator.classes %} - {{ name }} - {% endfor %} diff --git a/pytorch_geometric-2.3.1/docs/source/notes/data_cheatsheet.rst b/pytorch_geometric-2.3.1/docs/source/notes/data_cheatsheet.rst deleted file mode 100644 index 723d2f6..0000000 --- a/pytorch_geometric-2.3.1/docs/source/notes/data_cheatsheet.rst +++ /dev/null @@ -1,37 +0,0 @@ -:orphan: - -Dataset Cheatsheet -================== - -.. note:: - - This dataset statistics table is a **work in progress**. - Please consider helping us filling its content by providing statistics for individual datasets. - See `here `__ and `here `__ for examples on how to do so. - -.. list-table:: - :widths: 50 10 10 10 10 10 - :header-rows: 1 - - * - Name - - #graphs - - #nodes - - #edges - - #features - - #classes/#tasks -{% for cls in torch_geometric.datasets.classes %} - * - :class:`~torch_geometric.datasets.{{ cls }}` {% if torch_geometric.datasets.utils.paper_link(cls) %}(`Paper <{{ torch_geometric.datasets.utils.paper_link(cls) }}>`__){% endif %} - - {%if torch_geometric.datasets.utils.has_stats(cls) %}{{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', default=1) }}{% else %}{{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', default='') }}{% endif %} - - {{ torch_geometric.datasets.utils.get_stat(cls, '#nodes', default='') }} - - {{ torch_geometric.datasets.utils.get_stat(cls, '#edges', default='') }} - - {{ torch_geometric.datasets.utils.get_stat(cls, '#features', default='') }} - - {{ torch_geometric.datasets.utils.get_stat(cls, '#classes', default='') }}{{ torch_geometric.datasets.utils.get_stat(cls, '#tasks', default='') }} - {% for child in torch_geometric.datasets.utils.get_children(cls) %} - * - └─ {{ child }} - - {{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', child, default=1) }} - - {{ torch_geometric.datasets.utils.get_stat(cls, '#nodes', child, default='') }} - - {{ torch_geometric.datasets.utils.get_stat(cls, '#edges', child, default='') }} - - {{ torch_geometric.datasets.utils.get_stat(cls, '#features', child, default='') }} - - {{ torch_geometric.datasets.utils.get_stat(cls, '#classes', child, default='') }}{{ torch_geometric.datasets.utils.get_stat(cls, '#tasks', child, default='') }} - {% endfor %} -{% endfor %} diff --git a/pytorch_geometric-2.3.1/docs/source/tutorial/compile.rst b/pytorch_geometric-2.3.1/docs/source/tutorial/compile.rst deleted file mode 100644 index 63b9fd5..0000000 --- a/pytorch_geometric-2.3.1/docs/source/tutorial/compile.rst +++ /dev/null @@ -1,135 +0,0 @@ -Compiled Graph Neural Networks -============================== - -:meth:`torch.compile` is the latest method to speed up your :pytorch:`PyTorch` code in :obj:`torch >= 2.0.0`! -:meth:`torch.compile` makes PyTorch code run faster by JIT-compiling it into opimized kernels, all while required minimal code changes. - -Under the hood, :meth:`torch.compile` captures :pytorch:`PyTorch` programs via :obj:`TorchDynamo`, canonicalizes over 2,000 :pytorch:`PyTorch` operators via :obj:`PrimTorch`, and finally generates fast code out of it across multiple accelerators and backends via the deep learning compiler :obj:`TorchInductor`. - -.. note:: - See `here `__ for a general tutorial on how to leverage :meth:`torch.compile`, and `here `__ for a description of its interface. - -In this tutorial, we show how to optimize your custom :pyg:`PyG` model via :meth:`torch.compile`. - -Introducing :meth:`torch_geometric.compile` -------------------------------------------- - -By default, :meth:`torch.compile` struggles to optimize a custom :pyg:`PyG` model since its underlying :class:`~torch_geometric.nn.conv.MessagePassing` interface is JIT-unfriendly due to its generality. -As such, in :pyg:`PyG 2.3`, we introduce :meth:`torch_geometric.compile`, a wrapper around :meth:`torch.compile` with the same signature. - -:meth:`torch_geometric.compile` applies further optimizations to make :pyg:`PyG` models more compiler-friendly. -Specifically, it: - -#. Temporarily disables the usage of the extension packages :obj:`torch_scatter`, :obj:`torch_sparse` and :obj:`pyg_lib` during GNN execution workflows (since these are not *yet* directly optimizable by :pytorch:`PyTorch`). - From :pyg:`PyG 2.3` onwards, these packages are purely optional and not required anymore for running :pyg:`PyG` models (but :obj:`pyg_lib` may be required for graph sampling routines). - -#. Converts all instances of :class:`~torch_geometric.nn.conv.MessagePassing` modules into their jittable instances (see :meth:`torch_geometric.nn.conv.MessagePassing.jittable`) - -Without these adjustments, :meth:`torch.compile` may currently fail to correctly optimize your :pyg:`PyG` model. -We are working on fully relying on :meth:`torch.compile` for future releases. - -Basic Usage ------------ - -Leveraging :meth:`torch_geometric.compile` is as simple as the usage of :meth:`torch.compile`. -Once you have a :pyg:`PyG` model defined, simply wrap it with :meth:`torch_geometric.compile` to obtain its optimized version: - -.. code-block:: python - - import torch_geometric - from torch_geometric.nn import GraphSAGE - - model = GraphSAGE(in_channels, hidden_channels, num_layers, out_channels) - model = model.to(device) - - model = torch_geometric.compile(model) - -and execute it as usual: - -.. code-block:: python - - from torch_geometric.datasets import Planetoid - - dataset = Planetoid(root, name="Cora") - data = dataset[0].to(device) - - out = model(data.x, data.edge_index) - -We have incorporated multiple examples in :obj:`examples/compile` that further show the practical usage of :meth:`torch_geometric.compile`: - -#. `Node Classification `__ via :class:`~torch_geometric.nn.models.GCN` -#. `Graph Classification `__ via :class:`~torch_geometric.nn.models.GIN` - -Note that :meth:`torch.compile(model, dynamic=True)` does sadly not yet work for :pyg:`PyG` models on :pytorch:`PyTorch 2.0`. -While static compilation via :meth:`torch.compile(model, dynamic=False)` works fine, it will re-compile the model everytime it sees an input with a different shape. -That currently does not play that nicely with the way :pyg:`PyG` performs mini-batching, and will hence lead to major slow-downs. -We are working with the :pytorch:`PyTorch` team to fix this limitation (see `this `_ :github:`GitHub` issue). -A temporary workaround is to utilize the :class:`torch_geometric.transforms.Pad` transformation to ensure that all inputs are of equal shape. - -If you notice that :meth:`~torch_geometric.compile` fails for a certain :pyg:`PyG` model, do not hesitate to reach out either on :github:`null` `GitHub `_ or :slack:`null` `Slack `_. -We are very eager to improve :meth:`~torch_geometric.compile` support across the whole :pyg:`PyG` code base. - -Benchmark ---------- - -:meth:`torch.compile` works **fantastically well** for many :pyg:`PyG` models. -**Overall, we observe runtime improvements of nearly up to 300%.** - -Specifically, we benchmark :class:`~torch_geometric.nn.models.GCN`, :class:`~torch_geometric.nn.models.GraphSAGE` and :class:`~torch_geometric.nn.models.GIN` and compare runtimes obtained from traditional eager mode and :meth:`torch_geometric.compile`. -We use a synthetic graph with 10,000 nodes and 200,000 edges, and a hidden feature dimensionality of 64. -We report runtimes over 500 optimization steps: - -.. list-table:: - :widths: 15 15 15 15 15 15 - :header-rows: 1 - - * - Model - - Mode - - Forward - - Backward - - Total - - Speedup - * - :class:`~torch_geometric.nn.models.GCN` - - Eager - - 2.6396s - - 2.1697s - - 4.8093s - - - * - :class:`~torch_geometric.nn.models.GCN` - - **Compiled** - - **1.1082s** - - **0.5896s** - - **1.6978s** - - **2.83x** - * - :class:`~torch_geometric.nn.models.GraphSAGE` - - Eager - - 1.6023s - - 1.6428s - - 3.2451s - - - * - :class:`~torch_geometric.nn.models.GraphSAGE` - - **Compiled** - - **0.7033s** - - **0.7465s** - - **1.4498s** - - **2.24x** - * - :class:`~torch_geometric.nn.models.GIN` - - Eager - - 1.6701s - - 1.6990s - - 3.3690s - - - * - :class:`~torch_geometric.nn.models.GIN` - - **Compiled** - - **0.7320s** - - **0.7407s** - - **1.4727s** - - **2.29x** - -To reproduce these results, run - -.. code-block:: console - - python test/nn/models/test_basic_gnn.py - -from the root folder of your checked out :pyg:`PyG` repository from :github:`GitHub`. diff --git a/pytorch_geometric-2.3.1/examples/arma.py b/pytorch_geometric-2.3.1/examples/arma.py deleted file mode 100644 index e6673b8..0000000 --- a/pytorch_geometric-2.3.1/examples/arma.py +++ /dev/null @@ -1,68 +0,0 @@ -import os.path as osp - -import torch -import torch.nn.functional as F - -import torch_geometric.transforms as T -from torch_geometric.datasets import Planetoid -from torch_geometric.nn import ARMAConv - -dataset = 'Cora' -path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', dataset) -dataset = Planetoid(path, dataset, transform=T.NormalizeFeatures()) -data = dataset[0] - - -class Net(torch.nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels): - super().__init__() - - self.conv1 = ARMAConv(in_channels, hidden_channels, num_stacks=3, - num_layers=2, shared_weights=True, dropout=0.25) - - self.conv2 = ARMAConv(hidden_channels, out_channels, num_stacks=3, - num_layers=2, shared_weights=True, dropout=0.25, - act=lambda x: x) - - def forward(self, x, edge_index): - x = F.dropout(x, training=self.training) - x = F.relu(self.conv1(x, edge_index)) - x = F.dropout(x, training=self.training) - x = self.conv2(x, edge_index) - return F.log_softmax(x, dim=1) - - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -model, data = Net(dataset.num_features, 16, - dataset.num_classes).to(device), data.to(device) -optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4) - - -def train(): - model.train() - optimizer.zero_grad() - out = model(data.x, data.edge_index) - loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask]) - loss.backward() - optimizer.step() - - -def test(): - model.eval() - out, accs = model(data.x, data.edge_index), [] - for _, mask in data('train_mask', 'val_mask', 'test_mask'): - pred = out[mask].argmax(1) - acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item() - accs.append(acc) - return accs - - -best_val_acc = test_acc = 0 -for epoch in range(1, 401): - train() - train_acc, val_acc, tmp_test_acc = test() - if val_acc > best_val_acc: - best_val_acc = val_acc - test_acc = tmp_test_acc - print(f'Epoch: {epoch:03d}, Train: {train_acc:.4f}, ' - f'Val: {best_val_acc:.4f}, Test: {test_acc:.4f}') diff --git a/pytorch_geometric-2.3.1/examples/attentive_fp.py b/pytorch_geometric-2.3.1/examples/attentive_fp.py deleted file mode 100644 index ced6659..0000000 --- a/pytorch_geometric-2.3.1/examples/attentive_fp.py +++ /dev/null @@ -1,148 +0,0 @@ -import os.path as osp -from math import sqrt - -import torch -import torch.nn.functional as F -from rdkit import Chem - -from torch_geometric.datasets import MoleculeNet -from torch_geometric.loader import DataLoader -from torch_geometric.nn.models import AttentiveFP - - -class GenFeatures(object): - def __init__(self): - self.symbols = [ - 'B', 'C', 'N', 'O', 'F', 'Si', 'P', 'S', 'Cl', 'As', 'Se', 'Br', - 'Te', 'I', 'At', 'other' - ] - - self.hybridizations = [ - Chem.rdchem.HybridizationType.SP, - Chem.rdchem.HybridizationType.SP2, - Chem.rdchem.HybridizationType.SP3, - Chem.rdchem.HybridizationType.SP3D, - Chem.rdchem.HybridizationType.SP3D2, - 'other', - ] - - self.stereos = [ - Chem.rdchem.BondStereo.STEREONONE, - Chem.rdchem.BondStereo.STEREOANY, - Chem.rdchem.BondStereo.STEREOZ, - Chem.rdchem.BondStereo.STEREOE, - ] - - def __call__(self, data): - # Generate AttentiveFP features according to Table 1. - mol = Chem.MolFromSmiles(data.smiles) - - xs = [] - for atom in mol.GetAtoms(): - symbol = [0.] * len(self.symbols) - symbol[self.symbols.index(atom.GetSymbol())] = 1. - degree = [0.] * 6 - degree[atom.GetDegree()] = 1. - formal_charge = atom.GetFormalCharge() - radical_electrons = atom.GetNumRadicalElectrons() - hybridization = [0.] * len(self.hybridizations) - hybridization[self.hybridizations.index( - atom.GetHybridization())] = 1. - aromaticity = 1. if atom.GetIsAromatic() else 0. - hydrogens = [0.] * 5 - hydrogens[atom.GetTotalNumHs()] = 1. - chirality = 1. if atom.HasProp('_ChiralityPossible') else 0. - chirality_type = [0.] * 2 - if atom.HasProp('_CIPCode'): - chirality_type[['R', 'S'].index(atom.GetProp('_CIPCode'))] = 1. - - x = torch.tensor(symbol + degree + [formal_charge] + - [radical_electrons] + hybridization + - [aromaticity] + hydrogens + [chirality] + - chirality_type) - xs.append(x) - - data.x = torch.stack(xs, dim=0) - - edge_indices = [] - edge_attrs = [] - for bond in mol.GetBonds(): - edge_indices += [[bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()]] - edge_indices += [[bond.GetEndAtomIdx(), bond.GetBeginAtomIdx()]] - - bond_type = bond.GetBondType() - single = 1. if bond_type == Chem.rdchem.BondType.SINGLE else 0. - double = 1. if bond_type == Chem.rdchem.BondType.DOUBLE else 0. - triple = 1. if bond_type == Chem.rdchem.BondType.TRIPLE else 0. - aromatic = 1. if bond_type == Chem.rdchem.BondType.AROMATIC else 0. - conjugation = 1. if bond.GetIsConjugated() else 0. - ring = 1. if bond.IsInRing() else 0. - stereo = [0.] * 4 - stereo[self.stereos.index(bond.GetStereo())] = 1. - - edge_attr = torch.tensor( - [single, double, triple, aromatic, conjugation, ring] + stereo) - - edge_attrs += [edge_attr, edge_attr] - - if len(edge_attrs) == 0: - data.edge_index = torch.zeros((2, 0), dtype=torch.long) - data.edge_attr = torch.zeros((0, 10), dtype=torch.float) - else: - data.edge_index = torch.tensor(edge_indices).t().contiguous() - data.edge_attr = torch.stack(edge_attrs, dim=0) - - return data - - -path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'AFP_Mol') -dataset = MoleculeNet(path, name='ESOL', pre_transform=GenFeatures()).shuffle() - -N = len(dataset) // 10 -val_dataset = dataset[:N] -test_dataset = dataset[N:2 * N] -train_dataset = dataset[2 * N:] - -train_loader = DataLoader(train_dataset, batch_size=200, shuffle=True) -val_loader = DataLoader(val_dataset, batch_size=200) -test_loader = DataLoader(test_dataset, batch_size=200) - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -model = AttentiveFP(in_channels=39, hidden_channels=200, out_channels=1, - edge_dim=10, num_layers=2, num_timesteps=2, - dropout=0.2).to(device) - -optimizer = torch.optim.Adam(model.parameters(), lr=10**-2.5, - weight_decay=10**-5) - - -def train(): - total_loss = total_examples = 0 - for data in train_loader: - data = data.to(device) - optimizer.zero_grad() - out = model(data.x, data.edge_index, data.edge_attr, data.batch) - loss = F.mse_loss(out, data.y) - loss.backward() - optimizer.step() - total_loss += float(loss) * data.num_graphs - total_examples += data.num_graphs - return sqrt(total_loss / total_examples) - - -@torch.no_grad() -def test(loader): - mse = [] - for data in loader: - data = data.to(device) - out = model(data.x, data.edge_index, data.edge_attr, data.batch) - mse.append(F.mse_loss(out, data.y, reduction='none').cpu()) - return float(torch.cat(mse, dim=0).mean().sqrt()) - - -for epoch in range(1, 201): - train_rmse = train() - val_rmse = test(val_loader) - test_rmse = test(test_loader) - print(f'Epoch: {epoch:03d}, Loss: {train_rmse:.4f} Val: {val_rmse:.4f} ' - f'Test: {test_rmse:.4f}') diff --git a/pytorch_geometric-2.3.1/examples/compile/gcn.py b/pytorch_geometric-2.3.1/examples/compile/gcn.py deleted file mode 100644 index 453e51b..0000000 --- a/pytorch_geometric-2.3.1/examples/compile/gcn.py +++ /dev/null @@ -1,65 +0,0 @@ -import os.path as osp - -import torch -import torch.nn.functional as F - -import torch_geometric -import torch_geometric.transforms as T -from torch_geometric.datasets import Planetoid -from torch_geometric.nn import GCNConv - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - -path = osp.join('data', 'Planetoid') -dataset = Planetoid(path, name='Cora', transform=T.NormalizeFeatures()) -data = dataset[0].to(device) - - -class GCN(torch.nn.Module): - def __init__(self, in_channels, out_channels): - super().__init__() - self.conv1 = GCNConv(in_channels, 16) - self.conv2 = GCNConv(16, out_channels) - - def forward(self, x, edge_index): - x = F.dropout(x, p=0.5, training=self.training) - x = self.conv1(x, edge_index).relu() - x = F.dropout(x, p=0.5, training=self.training) - x = self.conv2(x, edge_index) - return x - - -model = GCN(dataset.num_features, dataset.num_classes).to(device) - -# Compile the model into an optimized version: -model = torch_geometric.compile(model) - -optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4) - - -def train(): - model.train() - optimizer.zero_grad() - out = model(data.x, data.edge_index) - loss = F.cross_entropy(out[data.train_mask], data.y[data.train_mask]) - loss.backward() - optimizer.step() - return float(loss) - - -@torch.no_grad() -def test(): - model.eval() - pred = model(data.x, data.edge_index).argmax(dim=-1) - - accs = [] - for mask in [data.train_mask, data.val_mask, data.test_mask]: - accs.append(int((pred[mask] == data.y[mask]).sum()) / int(mask.sum())) - return accs - - -for epoch in range(1, 201): - loss = train() - train_acc, val_acc, test_acc = test() - print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Train: {train_acc:.4f}, ' - f'Val: {val_acc:.4f}, Test: {test_acc:.4f}') diff --git a/pytorch_geometric-2.3.1/examples/compile/gin.py b/pytorch_geometric-2.3.1/examples/compile/gin.py deleted file mode 100644 index 46d9b67..0000000 --- a/pytorch_geometric-2.3.1/examples/compile/gin.py +++ /dev/null @@ -1,88 +0,0 @@ -import os.path as osp - -import torch -import torch.nn.functional as F - -import torch_geometric -import torch_geometric.transforms as T -from torch_geometric.datasets import TUDataset -from torch_geometric.loader import DataLoader -from torch_geometric.nn import MLP, GINConv, global_add_pool - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - -path = osp.join('data', 'TUDatasets') -transform = T.Pad(max_num_nodes=28, max_num_edges=66) -dataset = TUDataset(path, name='MUTAG', pre_transform=transform).shuffle() - -train_dataset = dataset[len(dataset) // 10:] -train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True, - drop_last=True) - -test_dataset = dataset[:len(dataset) // 10] -test_loader = DataLoader(test_dataset, batch_size=128) - - -class GIN(torch.nn.Module): - def __init__(self, in_channels, out_channels): - super().__init__() - - self.convs = torch.nn.ModuleList() - for _ in range(5): - mlp = MLP([in_channels, 32, 32]) - self.convs.append(GINConv(mlp, train_eps=False)) - in_channels = 32 - - self.mlp = MLP([32, 32, out_channels], norm=None, dropout=0.5) - - def forward(self, x, edge_index, batch): - for conv in self.convs: - x = conv(x, edge_index).relu() - x = global_add_pool(x, batch) - return self.mlp(x) - - -model = GIN(dataset.num_features, dataset.num_classes).to(device) - -# Compile the model into an optimized version: -# Note that `compile(model, dynamic=True)` does not work yet in PyTorch 2.0, so -# we use `transforms.Pad` and static compilation as a current workaround. -# See: https://github.com/pytorch/pytorch/issues/94640 -model = torch_geometric.compile(model) - -optimizer = torch.optim.Adam(model.parameters(), lr=0.01) - - -def train(): - model.train() - - total_loss = 0 - for data in train_loader: - data = data.to(device) - optimizer.zero_grad() - out = model(data.x, data.edge_index, data.batch) - loss = F.cross_entropy(out, data.y) - loss.backward() - optimizer.step() - total_loss += float(loss) * data.num_graphs - return total_loss / len(train_loader.dataset) - - -@torch.no_grad() -def test(loader): - model.eval() - - total_correct = 0 - for data in loader: - data = data.to(device) - pred = model(data.x, data.edge_index, data.batch).argmax(dim=-1) - total_correct += int((pred == data.y).sum()) - return total_correct / len(loader.dataset) - - -for epoch in range(1, 101): - loss = train() - train_acc = test(train_loader) - test_acc = test(test_loader) - print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Train: {train_acc:.4f}, ' - f'Test: {test_acc:.4f}') diff --git a/pytorch_geometric-2.3.1/examples/contrib/graphmask_explainer.py b/pytorch_geometric-2.3.1/examples/contrib/graphmask_explainer.py deleted file mode 100644 index 2a007d1..0000000 --- a/pytorch_geometric-2.3.1/examples/contrib/graphmask_explainer.py +++ /dev/null @@ -1,103 +0,0 @@ -import os.path as osp - -import torch -import torch.nn.functional as F - -from torch_geometric.contrib.explain import GraphMaskExplainer -from torch_geometric.datasets import Planetoid -from torch_geometric.explain import Explainer -from torch_geometric.nn import GATConv, GCNConv - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - -path = osp.join(osp.dirname(osp.realpath(__file__)), 'data', 'Planetoid') -dataset = Planetoid(path, name='Cora') -data = dataset[0].to(device) - -# GCN Node Classification ===================================================== - - -class GCN(torch.nn.Module): - def __init__(self): - super().__init__() - self.conv1 = GCNConv(dataset.num_features, 16) - self.conv2 = GCNConv(16, dataset.num_classes) - - def forward(self, x, edge_index): - x = self.conv1(x, edge_index).relu() - x = F.dropout(x, training=self.training) - x = self.conv2(x, edge_index) - return F.log_softmax(x, dim=1) - - -model = GCN().to(device) -optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4) - -for epoch in range(1, 201): - model.train() - optimizer.zero_grad() - out = model(data.x, data.edge_index) - loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask]) - loss.backward() - optimizer.step() - -explainer = Explainer( - model=model, - algorithm=GraphMaskExplainer(2, epochs=5, layer_type='GCN'), - explanation_type='model', - node_mask_type='attributes', - edge_mask_type='object', - model_config=dict( - mode='multiclass_classification', - task_level='node', - return_type='log_probs', - ), -) - -node_index = 10 -explanation = explainer(data.x, data.edge_index, index=node_index) -print(f'Generated explanations in {explanation.available_explanations}') - -# GAT Node Classification ===================================================== - - -class GAT(torch.nn.Module): - def __init__(self): - super().__init__() - self.conv1 = GATConv(dataset.num_features, 8, heads=8) - self.conv2 = GATConv(64, dataset.num_classes, heads=1, concat=False) - - def forward(self, x, edge_index): - x = self.conv1(x, edge_index).relu() - x = F.dropout(x, training=self.training) - x = self.conv2(x, edge_index) - return F.log_softmax(x, dim=1) - - -model = GAT().to(device) -optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4) - -for epoch in range(1, 201): - model.train() - optimizer.zero_grad() - out = model(data.x, data.edge_index) - loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask]) - loss.backward() - optimizer.step() - -explainer = Explainer( - model=model, - algorithm=GraphMaskExplainer(2, epochs=5, layer_type='GAT'), - explanation_type='model', - node_mask_type='attributes', - edge_mask_type='object', - model_config=dict( - mode='multiclass_classification', - task_level='node', - return_type='log_probs', - ), -) - -node_index = torch.tensor([10, 20]) -explanation = explainer(data.x, data.edge_index, index=node_index) -print(f'Generated explanations in {explanation.available_explanations}') diff --git a/pytorch_geometric-2.3.1/examples/gat.py b/pytorch_geometric-2.3.1/examples/gat.py deleted file mode 100644 index 49769f0..0000000 --- a/pytorch_geometric-2.3.1/examples/gat.py +++ /dev/null @@ -1,79 +0,0 @@ -import argparse -import os.path as osp - -import torch -import torch.nn.functional as F - -import torch_geometric.transforms as T -from torch_geometric.datasets import Planetoid -from torch_geometric.logging import init_wandb, log -from torch_geometric.nn import GATConv - -parser = argparse.ArgumentParser() -parser.add_argument('--dataset', type=str, default='Cora') -parser.add_argument('--hidden_channels', type=int, default=8) -parser.add_argument('--heads', type=int, default=8) -parser.add_argument('--lr', type=float, default=0.005) -parser.add_argument('--epochs', type=int, default=200) -parser.add_argument('--wandb', action='store_true', help='Track experiment') -args = parser.parse_args() - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -init_wandb(name=f'GAT-{args.dataset}', heads=args.heads, epochs=args.epochs, - hidden_channels=args.hidden_channels, lr=args.lr, device=device) - -path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Planetoid') -dataset = Planetoid(path, args.dataset, transform=T.NormalizeFeatures()) -data = dataset[0].to(device) - - -class GAT(torch.nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, heads): - super().__init__() - self.conv1 = GATConv(in_channels, hidden_channels, heads, dropout=0.6) - # On the Pubmed dataset, use `heads` output heads in `conv2`. - self.conv2 = GATConv(hidden_channels * heads, out_channels, heads=1, - concat=False, dropout=0.6) - - def forward(self, x, edge_index): - x = F.dropout(x, p=0.6, training=self.training) - x = F.elu(self.conv1(x, edge_index)) - x = F.dropout(x, p=0.6, training=self.training) - x = self.conv2(x, edge_index) - return x - - -model = GAT(dataset.num_features, args.hidden_channels, dataset.num_classes, - args.heads).to(device) -optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=5e-4) - - -def train(): - model.train() - optimizer.zero_grad() - out = model(data.x, data.edge_index) - loss = F.cross_entropy(out[data.train_mask], data.y[data.train_mask]) - loss.backward() - optimizer.step() - return float(loss) - - -@torch.no_grad() -def test(): - model.eval() - pred = model(data.x, data.edge_index).argmax(dim=-1) - - accs = [] - for mask in [data.train_mask, data.val_mask, data.test_mask]: - accs.append(int((pred[mask] == data.y[mask]).sum()) / int(mask.sum())) - return accs - - -best_val_acc = final_test_acc = 0 -for epoch in range(1, args.epochs + 1): - loss = train() - train_acc, val_acc, tmp_test_acc = test() - if val_acc > best_val_acc: - best_val_acc = val_acc - test_acc = tmp_test_acc - log(Epoch=epoch, Loss=loss, Train=train_acc, Val=val_acc, Test=test_acc) diff --git a/pytorch_geometric-2.3.1/examples/gcn.py b/pytorch_geometric-2.3.1/examples/gcn.py deleted file mode 100644 index ac68e82..0000000 --- a/pytorch_geometric-2.3.1/examples/gcn.py +++ /dev/null @@ -1,93 +0,0 @@ -import argparse -import os.path as osp - -import torch -import torch.nn.functional as F - -import torch_geometric.transforms as T -from torch_geometric.datasets import Planetoid -from torch_geometric.logging import init_wandb, log -from torch_geometric.nn import GCNConv - -parser = argparse.ArgumentParser() -parser.add_argument('--dataset', type=str, default='Cora') -parser.add_argument('--hidden_channels', type=int, default=16) -parser.add_argument('--lr', type=float, default=0.01) -parser.add_argument('--epochs', type=int, default=200) -parser.add_argument('--use_gdc', action='store_true', help='Use GDC') -parser.add_argument('--wandb', action='store_true', help='Track experiment') -args = parser.parse_args() - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -init_wandb(name=f'GCN-{args.dataset}', lr=args.lr, epochs=args.epochs, - hidden_channels=args.hidden_channels, device=device) - -path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Planetoid') -dataset = Planetoid(path, args.dataset, transform=T.NormalizeFeatures()) -data = dataset[0] - -if args.use_gdc: - transform = T.GDC( - self_loop_weight=1, - normalization_in='sym', - normalization_out='col', - diffusion_kwargs=dict(method='ppr', alpha=0.05), - sparsification_kwargs=dict(method='topk', k=128, dim=0), - exact=True, - ) - data = transform(data) - - -class GCN(torch.nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels): - super().__init__() - self.conv1 = GCNConv(in_channels, hidden_channels, cached=True, - normalize=not args.use_gdc) - self.conv2 = GCNConv(hidden_channels, out_channels, cached=True, - normalize=not args.use_gdc) - - def forward(self, x, edge_index, edge_weight=None): - x = F.dropout(x, p=0.5, training=self.training) - x = self.conv1(x, edge_index, edge_weight).relu() - x = F.dropout(x, p=0.5, training=self.training) - x = self.conv2(x, edge_index, edge_weight) - return x - - -model = GCN(dataset.num_features, args.hidden_channels, dataset.num_classes) -model, data = model.to(device), data.to(device) -optimizer = torch.optim.Adam([ - dict(params=model.conv1.parameters(), weight_decay=5e-4), - dict(params=model.conv2.parameters(), weight_decay=0) -], lr=args.lr) # Only perform weight-decay on first convolution. - - -def train(): - model.train() - optimizer.zero_grad() - out = model(data.x, data.edge_index, data.edge_attr) - loss = F.cross_entropy(out[data.train_mask], data.y[data.train_mask]) - loss.backward() - optimizer.step() - return float(loss) - - -@torch.no_grad() -def test(): - model.eval() - pred = model(data.x, data.edge_index, data.edge_attr).argmax(dim=-1) - - accs = [] - for mask in [data.train_mask, data.val_mask, data.test_mask]: - accs.append(int((pred[mask] == data.y[mask]).sum()) / int(mask.sum())) - return accs - - -best_val_acc = final_test_acc = 0 -for epoch in range(1, args.epochs + 1): - loss = train() - train_acc, val_acc, tmp_test_acc = test() - if val_acc > best_val_acc: - best_val_acc = val_acc - test_acc = tmp_test_acc - log(Epoch=epoch, Loss=loss, Train=train_acc, Val=val_acc, Test=test_acc) diff --git a/pytorch_geometric-2.3.1/examples/graph_gps.py b/pytorch_geometric-2.3.1/examples/graph_gps.py deleted file mode 100644 index 161148e..0000000 --- a/pytorch_geometric-2.3.1/examples/graph_gps.py +++ /dev/null @@ -1,91 +0,0 @@ -import os.path as osp - -import torch -from torch.nn import Embedding, Linear, ModuleList, ReLU, Sequential - -import torch_geometric.transforms as T -from torch_geometric.datasets import ZINC -from torch_geometric.loader import DataLoader -from torch_geometric.nn import GINEConv, GPSConv, global_add_pool - -path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'ZINC-PE') -transform = T.AddRandomWalkPE(walk_length=20, attr_name='pe') -train_dataset = ZINC(path, subset=True, split='train', pre_transform=transform) -val_dataset = ZINC(path, subset=True, split='val', pre_transform=transform) -test_dataset = ZINC(path, subset=True, split='test', pre_transform=transform) - -train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True) -val_loader = DataLoader(val_dataset, batch_size=64) -test_loader = DataLoader(test_dataset, batch_size=64) - - -class GPS(torch.nn.Module): - def __init__(self, channels: int, num_layers: int): - super().__init__() - - self.node_emb = Embedding(21, channels) - self.pe_lin = Linear(20, channels) - self.edge_emb = Embedding(4, channels) - - self.convs = ModuleList() - for _ in range(num_layers): - nn = Sequential( - Linear(channels, channels), - ReLU(), - Linear(channels, channels), - ) - conv = GPSConv(channels, GINEConv(nn), heads=4, attn_dropout=0.5) - self.convs.append(conv) - - self.lin = Linear(channels, 1) - - def forward(self, x, pe, edge_index, edge_attr, batch): - x = self.node_emb(x.squeeze(-1)) + self.pe_lin(pe) - edge_attr = self.edge_emb(edge_attr) - - for conv in self.convs: - x = conv(x, edge_index, batch, edge_attr=edge_attr) - x = global_add_pool(x, batch) - return self.lin(x) - - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -model = GPS(channels=64, num_layers=10).to(device) -optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-5) - - -def train(epoch): - model.train() - - total_loss = 0 - for data in train_loader: - data = data.to(device) - optimizer.zero_grad() - out = model(data.x, data.pe, data.edge_index, data.edge_attr, - data.batch) - loss = (out.squeeze() - data.y).abs().mean() - loss.backward() - total_loss += loss.item() * data.num_graphs - optimizer.step() - return total_loss / len(train_loader.dataset) - - -@torch.no_grad() -def test(loader): - model.eval() - - total_error = 0 - for data in loader: - data = data.to(device) - out = model(data.x, data.pe, data.edge_index, data.edge_attr, - data.batch) - total_error += (out.squeeze() - data.y).abs().sum().item() - return total_error / len(loader.dataset) - - -for epoch in range(1, 101): - loss = train(epoch) - val_mae = test(val_loader) - test_mae = test(test_loader) - print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}, Val: {val_mae:.4f}, ' - f'Test: {test_mae:.4f}') diff --git a/pytorch_geometric-2.3.1/examples/infomax_inductive.py b/pytorch_geometric-2.3.1/examples/infomax_inductive.py deleted file mode 100644 index 74a5354..0000000 --- a/pytorch_geometric-2.3.1/examples/infomax_inductive.py +++ /dev/null @@ -1,104 +0,0 @@ -import os.path as osp - -import torch -import torch.nn as nn -from tqdm import tqdm - -from torch_geometric.datasets import Reddit -from torch_geometric.loader import NeighborSampler -from torch_geometric.nn import DeepGraphInfomax, SAGEConv - -path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Reddit') -dataset = Reddit(path) -data = dataset[0] - -train_loader = NeighborSampler(data.edge_index, node_idx=None, - sizes=[10, 10, 25], batch_size=256, - shuffle=True, num_workers=12) - -test_loader = NeighborSampler(data.edge_index, node_idx=None, - sizes=[10, 10, 25], batch_size=256, - shuffle=False, num_workers=12) - - -class Encoder(nn.Module): - def __init__(self, in_channels, hidden_channels): - super().__init__() - self.convs = torch.nn.ModuleList([ - SAGEConv(in_channels, hidden_channels), - SAGEConv(hidden_channels, hidden_channels), - SAGEConv(hidden_channels, hidden_channels) - ]) - - self.activations = torch.nn.ModuleList() - self.activations.extend([ - nn.PReLU(hidden_channels), - nn.PReLU(hidden_channels), - nn.PReLU(hidden_channels) - ]) - - def forward(self, x, adjs): - for i, (edge_index, _, size) in enumerate(adjs): - x_target = x[:size[1]] # Target nodes are always placed first. - x = self.convs[i]((x, x_target), edge_index) - x = self.activations[i](x) - return x - - -def corruption(x, edge_index): - return x[torch.randperm(x.size(0))], edge_index - - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -model = DeepGraphInfomax( - hidden_channels=512, encoder=Encoder(dataset.num_features, 512), - summary=lambda z, *args, **kwargs: torch.sigmoid(z.mean(dim=0)), - corruption=corruption).to(device) - -model = model.to(device) -optimizer = torch.optim.Adam(model.parameters(), lr=0.0001) - -x, y = data.x.to(device), data.y.to(device) - - -def train(epoch): - model.train() - - total_loss = total_examples = 0 - for batch_size, n_id, adjs in tqdm(train_loader, - desc=f'Epoch {epoch:02d}'): - # `adjs` holds a list of `(edge_index, e_id, size)` tuples. - adjs = [adj.to(device) for adj in adjs] - - optimizer.zero_grad() - pos_z, neg_z, summary = model(x[n_id], adjs) - loss = model.loss(pos_z, neg_z, summary) - loss.backward() - optimizer.step() - total_loss += float(loss) * pos_z.size(0) - total_examples += pos_z.size(0) - - return total_loss / total_examples - - -@torch.no_grad() -def test(): - model.eval() - - zs = [] - for i, (batch_size, n_id, adjs) in enumerate(test_loader): - adjs = [adj.to(device) for adj in adjs] - zs.append(model(x[n_id], adjs)[0]) - z = torch.cat(zs, dim=0) - train_val_mask = data.train_mask | data.val_mask - acc = model.test(z[train_val_mask], y[train_val_mask], z[data.test_mask], - y[data.test_mask], max_iter=10000) - return acc - - -for epoch in range(1, 31): - loss = train(epoch) - print(f'Epoch {epoch:02d}, Loss: {loss:.4f}') - -test_acc = test() -print(f'Test Accuracy: {test_acc:.4f}') diff --git a/pytorch_geometric-2.3.1/examples/infomax_transductive.py b/pytorch_geometric-2.3.1/examples/infomax_transductive.py deleted file mode 100644 index e268d51..0000000 --- a/pytorch_geometric-2.3.1/examples/infomax_transductive.py +++ /dev/null @@ -1,61 +0,0 @@ -import os.path as osp - -import torch -import torch.nn as nn - -from torch_geometric.datasets import Planetoid -from torch_geometric.nn import DeepGraphInfomax, GCNConv - -dataset = 'Cora' -path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', dataset) -dataset = Planetoid(path, dataset) - - -class Encoder(nn.Module): - def __init__(self, in_channels, hidden_channels): - super().__init__() - self.conv = GCNConv(in_channels, hidden_channels, cached=True) - self.prelu = nn.PReLU(hidden_channels) - - def forward(self, x, edge_index): - x = self.conv(x, edge_index) - x = self.prelu(x) - return x - - -def corruption(x, edge_index): - return x[torch.randperm(x.size(0))], edge_index - - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -model = DeepGraphInfomax( - hidden_channels=512, encoder=Encoder(dataset.num_features, 512), - summary=lambda z, *args, **kwargs: torch.sigmoid(z.mean(dim=0)), - corruption=corruption).to(device) -data = dataset[0].to(device) -optimizer = torch.optim.Adam(model.parameters(), lr=0.001) - - -def train(): - model.train() - optimizer.zero_grad() - pos_z, neg_z, summary = model(data.x, data.edge_index) - loss = model.loss(pos_z, neg_z, summary) - loss.backward() - optimizer.step() - return loss.item() - - -def test(): - model.eval() - z, _, _ = model(data.x, data.edge_index) - acc = model.test(z[data.train_mask], data.y[data.train_mask], - z[data.test_mask], data.y[data.test_mask], max_iter=150) - return acc - - -for epoch in range(1, 301): - loss = train() - print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}') -acc = test() -print(f'Accuracy: {acc:.4f}') diff --git a/pytorch_geometric-2.3.1/examples/linkx.py b/pytorch_geometric-2.3.1/examples/linkx.py deleted file mode 100644 index 169eb28..0000000 --- a/pytorch_geometric-2.3.1/examples/linkx.py +++ /dev/null @@ -1,47 +0,0 @@ -import os.path as osp - -import torch -import torch.nn.functional as F - -from torch_geometric.datasets import LINKXDataset -from torch_geometric.nn import LINKX - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - -path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'LINKX') -dataset = LINKXDataset(path, name='Penn94') -data = dataset[0].to(device) - -model = LINKX(data.num_nodes, data.num_features, hidden_channels=32, - out_channels=dataset.num_classes, num_layers=1, - num_edge_layers=1, num_node_layers=1, dropout=0.5).to(device) -optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=1e-3) - - -def train(): - model.train() - optimizer.zero_grad() - out = model(data.x, data.edge_index) - mask = data.train_mask[:, 0] # Use the first set of the five masks. - loss = F.cross_entropy(out[mask], data.y[mask]) - loss.backward() - optimizer.step() - return float(loss) - - -@torch.no_grad() -def test(): - accs = [] - model.eval() - pred = model(data.x, data.edge_index).argmax(dim=-1) - for _, mask in data('train_mask', 'val_mask', 'test_mask'): - mask = mask[:, 0] # Use the first set of the five masks. - accs.append(int((pred[mask] == data.y[mask]).sum()) / int(mask.sum())) - return accs - - -for epoch in range(1, 201): - loss = train() - train_acc, val_acc, test_acc = test() - print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Train: {train_acc:.4f}, ' - f'Val: {val_acc:.4f}, Test: {test_acc:.4f}') diff --git a/pytorch_geometric-2.3.1/examples/mem_pool.py b/pytorch_geometric-2.3.1/examples/mem_pool.py deleted file mode 100644 index 7606306..0000000 --- a/pytorch_geometric-2.3.1/examples/mem_pool.py +++ /dev/null @@ -1,117 +0,0 @@ -import os.path as osp - -import torch -import torch.nn.functional as F -from torch.nn import BatchNorm1d, LeakyReLU, Linear - -from torch_geometric.datasets import TUDataset -from torch_geometric.loader import DataLoader -from torch_geometric.nn import DeepGCNLayer, GATConv, MemPooling - -path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'TUD') -dataset = TUDataset(path, name="PROTEINS_full", use_node_attr=True) -dataset.data.x = dataset.data.x[:, :-3] # only use non-binary features. -dataset = dataset.shuffle() - -n = (len(dataset)) // 10 -test_dataset = dataset[:n] -val_dataset = dataset[n:2 * n] -train_dataset = dataset[2 * n:] - -test_loader = DataLoader(test_dataset, batch_size=20) -val_loader = DataLoader(val_dataset, batch_size=20) -train_loader = DataLoader(train_dataset, batch_size=20) - - -class Net(torch.nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, dropout): - super().__init__() - self.dropout = dropout - - self.lin = Linear(in_channels, hidden_channels) - - self.convs = torch.nn.ModuleList() - for i in range(2): - conv = GATConv(hidden_channels, hidden_channels, dropout=dropout) - norm = BatchNorm1d(hidden_channels) - act = LeakyReLU() - self.convs.append( - DeepGCNLayer(conv, norm, act, block='res+', dropout=dropout)) - - self.mem1 = MemPooling(hidden_channels, 80, heads=5, num_clusters=10) - self.mem2 = MemPooling(80, out_channels, heads=5, num_clusters=1) - - def forward(self, x, edge_index, batch): - x = self.lin(x) - for conv in self.convs: - x = conv(x, edge_index) - - x, S1 = self.mem1(x, batch) - x = F.leaky_relu(x) - x = F.dropout(x, p=self.dropout) - x, S2 = self.mem2(x) - - return ( - F.log_softmax(x.squeeze(1), dim=-1), - MemPooling.kl_loss(S1) + MemPooling.kl_loss(S2), - ) - - -device = 'cuda' if torch.cuda.is_available() else 'cpu' -model = Net(dataset.num_features, 32, dataset.num_classes, dropout=0.1) -model = model.to(device) -optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=4e-5) - - -def train(): - model.train() - - model.mem1.k.requires_grad = False - model.mem2.k.requires_grad = False - for data in train_loader: - optimizer.zero_grad() - data = data.to(device) - out = model(data.x, data.edge_index, data.batch)[0] - loss = F.nll_loss(out, data.y) - loss.backward() - optimizer.step() - - kl_loss = 0. - model.mem1.k.requires_grad = True - model.mem2.k.requires_grad = True - optimizer.zero_grad() - for data in train_loader: - data = data.to(device) - kl_loss += model(data.x, data.edge_index, data.batch)[1] - kl_loss /= len(train_loader.dataset) - kl_loss.backward() - optimizer.step() - - -@torch.no_grad() -def test(loader): - model.eval() - total_correct = 0 - for data in loader: - data = data.to(device) - out = model(data.x, data.edge_index, data.batch)[0] - total_correct += int((out.argmax(dim=-1) == data.y).sum()) - return total_correct / len(loader.dataset) - - -patience = start_patience = 250 -test_acc = best_val_acc = 0. -for epoch in range(1, 2001): - train() - val_acc = test(val_loader) - if epoch % 500 == 0: - optimizer.param_groups[0]['lr'] *= 0.5 - if best_val_acc < val_acc: - patience = start_patience - best_val_acc = val_acc - test_acc = test(test_loader) - else: - patience -= 1 - print(f'Epoch {epoch:02d}, Val: {val_acc:.3f}, Test: {test_acc:.3f}') - if patience <= 0: - break diff --git a/pytorch_geometric-2.3.1/examples/multi_gpu/data_parallel.py b/pytorch_geometric-2.3.1/examples/multi_gpu/data_parallel.py deleted file mode 100644 index 42eb40f..0000000 --- a/pytorch_geometric-2.3.1/examples/multi_gpu/data_parallel.py +++ /dev/null @@ -1,50 +0,0 @@ -import os.path as osp - -import torch -import torch.nn.functional as F - -import torch_geometric.transforms as T -from torch_geometric.datasets import MNISTSuperpixels -from torch_geometric.loader import DataListLoader -from torch_geometric.nn import DataParallel, SplineConv, global_mean_pool - -path = osp.join(osp.dirname(osp.realpath(__file__)), '../../data', 'MNIST') -dataset = MNISTSuperpixels(path, transform=T.Cartesian()).shuffle() -loader = DataListLoader(dataset, batch_size=1024, shuffle=True) - - -class Net(torch.nn.Module): - def __init__(self): - super().__init__() - self.conv1 = SplineConv(dataset.num_features, 32, dim=2, kernel_size=5) - self.conv2 = SplineConv(32, 64, dim=2, kernel_size=5) - self.lin1 = torch.nn.Linear(64, 128) - self.lin2 = torch.nn.Linear(128, dataset.num_classes) - - def forward(self, data): - print(f'Inside model - num graphs: {data.num_graphs}, ' - f'device: {data.batch.device}') - - x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr - x = F.elu(self.conv1(x, edge_index, edge_attr)) - x = F.elu(self.conv2(x, edge_index, edge_attr)) - x = global_mean_pool(x, data.batch) - x = F.elu(self.lin1(x)) - return F.log_softmax(self.lin2(x), dim=1) - - -model = Net() -print(f"Let's use {torch.cuda.device_count()} GPUs!") -model = DataParallel(model) -device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') -model = model.to(device) -optimizer = torch.optim.Adam(model.parameters(), lr=0.01) - -for data_list in loader: - optimizer.zero_grad() - output = model(data_list) - print(f'Outside model - num graphs: {output.size(0)}') - y = torch.cat([data.y for data in data_list]).to(output.device) - loss = F.nll_loss(output, y) - loss.backward() - optimizer.step() diff --git a/pytorch_geometric-2.3.1/examples/mutag_gin.py b/pytorch_geometric-2.3.1/examples/mutag_gin.py deleted file mode 100644 index 4f0d1f1..0000000 --- a/pytorch_geometric-2.3.1/examples/mutag_gin.py +++ /dev/null @@ -1,93 +0,0 @@ -import argparse -import os.path as osp - -import torch -import torch.nn.functional as F - -from torch_geometric.datasets import TUDataset -from torch_geometric.loader import DataLoader -from torch_geometric.logging import init_wandb, log -from torch_geometric.nn import MLP, GINConv, global_add_pool - -parser = argparse.ArgumentParser() -parser.add_argument('--dataset', type=str, default='MUTAG') -parser.add_argument('--batch_size', type=int, default=128) -parser.add_argument('--hidden_channels', type=int, default=32) -parser.add_argument('--num_layers', type=int, default=5) -parser.add_argument('--lr', type=float, default=0.01) -parser.add_argument('--epochs', type=int, default=100) -parser.add_argument('--wandb', action='store_true', help='Track experiment') -args = parser.parse_args() - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -init_wandb(name=f'GIN-{args.dataset}', batch_size=args.batch_size, lr=args.lr, - epochs=args.epochs, hidden_channels=args.hidden_channels, - num_layers=args.num_layers, device=device) - -path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'TU') -dataset = TUDataset(path, name=args.dataset).shuffle() - -train_dataset = dataset[len(dataset) // 10:] -train_loader = DataLoader(train_dataset, args.batch_size, shuffle=True) - -test_dataset = dataset[:len(dataset) // 10] -test_loader = DataLoader(test_dataset, args.batch_size) - - -class Net(torch.nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, num_layers): - super().__init__() - - self.convs = torch.nn.ModuleList() - for _ in range(num_layers): - mlp = MLP([in_channels, hidden_channels, hidden_channels]) - self.convs.append(GINConv(nn=mlp, train_eps=False)) - in_channels = hidden_channels - - self.mlp = MLP([hidden_channels, hidden_channels, out_channels], - norm=None, dropout=0.5) - - def forward(self, x, edge_index, batch): - for conv in self.convs: - x = conv(x, edge_index).relu() - x = global_add_pool(x, batch) - return self.mlp(x) - - -model = Net(dataset.num_features, args.hidden_channels, dataset.num_classes, - args.num_layers).to(device) -optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) - - -def train(): - model.train() - - total_loss = 0 - for data in train_loader: - data = data.to(device) - optimizer.zero_grad() - out = model(data.x, data.edge_index, data.batch) - loss = F.cross_entropy(out, data.y) - loss.backward() - optimizer.step() - total_loss += float(loss) * data.num_graphs - return total_loss / len(train_loader.dataset) - - -@torch.no_grad() -def test(loader): - model.eval() - - total_correct = 0 - for data in loader: - data = data.to(device) - pred = model(data.x, data.edge_index, data.batch).argmax(dim=-1) - total_correct += int((pred == data.y).sum()) - return total_correct / len(loader.dataset) - - -for epoch in range(1, args.epochs + 1): - loss = train() - train_acc = test(train_loader) - test_acc = test(test_loader) - log(Epoch=epoch, Loss=loss, Train=train_acc, Test=test_acc) diff --git a/pytorch_geometric-2.3.1/examples/node2vec.py b/pytorch_geometric-2.3.1/examples/node2vec.py deleted file mode 100644 index 641cdc7..0000000 --- a/pytorch_geometric-2.3.1/examples/node2vec.py +++ /dev/null @@ -1,74 +0,0 @@ -import os.path as osp -import sys - -import matplotlib.pyplot as plt -import torch -from sklearn.manifold import TSNE - -from torch_geometric.datasets import Planetoid -from torch_geometric.nn import Node2Vec - - -def main(): - dataset = 'Cora' - path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', dataset) - dataset = Planetoid(path, dataset) - data = dataset[0] - - device = 'cuda' if torch.cuda.is_available() else 'cpu' - model = Node2Vec(data.edge_index, embedding_dim=128, walk_length=20, - context_size=10, walks_per_node=10, - num_negative_samples=1, p=1, q=1, sparse=True).to(device) - - num_workers = 0 if sys.platform.startswith('win') else 4 - loader = model.loader(batch_size=128, shuffle=True, - num_workers=num_workers) - optimizer = torch.optim.SparseAdam(list(model.parameters()), lr=0.01) - - def train(): - model.train() - total_loss = 0 - for pos_rw, neg_rw in loader: - optimizer.zero_grad() - loss = model.loss(pos_rw.to(device), neg_rw.to(device)) - loss.backward() - optimizer.step() - total_loss += loss.item() - return total_loss / len(loader) - - @torch.no_grad() - def test(): - model.eval() - z = model() - acc = model.test(z[data.train_mask], data.y[data.train_mask], - z[data.test_mask], data.y[data.test_mask], - max_iter=150) - return acc - - for epoch in range(1, 101): - loss = train() - acc = test() - print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}, Acc: {acc:.4f}') - - @torch.no_grad() - def plot_points(colors): - model.eval() - z = model(torch.arange(data.num_nodes, device=device)) - z = TSNE(n_components=2).fit_transform(z.cpu().numpy()) - y = data.y.cpu().numpy() - - plt.figure(figsize=(8, 8)) - for i in range(dataset.num_classes): - plt.scatter(z[y == i, 0], z[y == i, 1], s=20, color=colors[i]) - plt.axis('off') - plt.show() - - colors = [ - '#ffc0cb', '#bada55', '#008080', '#420420', '#7fe5f0', '#065535', - '#ffd700' - ] - plot_points(colors) - - -if __name__ == "__main__": - main() diff --git a/pytorch_geometric-2.3.1/examples/ogbn_products_gat.py b/pytorch_geometric-2.3.1/examples/ogbn_products_gat.py deleted file mode 100644 index 4688e8b..0000000 --- a/pytorch_geometric-2.3.1/examples/ogbn_products_gat.py +++ /dev/null @@ -1,195 +0,0 @@ -# Reaches around 0.7945 ± 0.0059 test accuracy. - -import os.path as osp - -import torch -import torch.nn.functional as F -from ogb.nodeproppred import Evaluator, PygNodePropPredDataset -from torch.nn import Linear as Lin -from tqdm import tqdm - -from torch_geometric.loader import NeighborSampler -from torch_geometric.nn import GATConv - -root = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'products') -dataset = PygNodePropPredDataset('ogbn-products', root) -split_idx = dataset.get_idx_split() -evaluator = Evaluator(name='ogbn-products') -data = dataset[0] - -train_idx = split_idx['train'] -train_loader = NeighborSampler(data.edge_index, node_idx=train_idx, - sizes=[10, 10, 10], batch_size=512, - shuffle=True, num_workers=12) -subgraph_loader = NeighborSampler(data.edge_index, node_idx=None, sizes=[-1], - batch_size=1024, shuffle=False, - num_workers=12) - - -class GAT(torch.nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, num_layers, - heads): - super().__init__() - - self.num_layers = num_layers - - self.convs = torch.nn.ModuleList() - self.convs.append(GATConv(dataset.num_features, hidden_channels, - heads)) - for _ in range(num_layers - 2): - self.convs.append( - GATConv(heads * hidden_channels, hidden_channels, heads)) - self.convs.append( - GATConv(heads * hidden_channels, out_channels, heads, - concat=False)) - - self.skips = torch.nn.ModuleList() - self.skips.append(Lin(dataset.num_features, hidden_channels * heads)) - for _ in range(num_layers - 2): - self.skips.append( - Lin(hidden_channels * heads, hidden_channels * heads)) - self.skips.append(Lin(hidden_channels * heads, out_channels)) - - def reset_parameters(self): - for conv in self.convs: - conv.reset_parameters() - for skip in self.skips: - skip.reset_parameters() - - def forward(self, x, adjs): - # `train_loader` computes the k-hop neighborhood of a batch of nodes, - # and returns, for each layer, a bipartite graph object, holding the - # bipartite edges `edge_index`, the index `e_id` of the original edges, - # and the size/shape `size` of the bipartite graph. - # Target nodes are also included in the source nodes so that one can - # easily apply skip-connections or add self-loops. - for i, (edge_index, _, size) in enumerate(adjs): - x_target = x[:size[1]] # Target nodes are always placed first. - x = self.convs[i]((x, x_target), edge_index) - x = x + self.skips[i](x_target) - if i != self.num_layers - 1: - x = F.elu(x) - x = F.dropout(x, p=0.5, training=self.training) - return x.log_softmax(dim=-1) - - def inference(self, x_all): - pbar = tqdm(total=x_all.size(0) * self.num_layers) - pbar.set_description('Evaluating') - - # Compute representations of nodes layer by layer, using *all* - # available edges. This leads to faster computation in contrast to - # immediately computing the final representations of each batch. - total_edges = 0 - for i in range(self.num_layers): - xs = [] - for batch_size, n_id, adj in subgraph_loader: - edge_index, _, size = adj.to(device) - total_edges += edge_index.size(1) - x = x_all[n_id].to(device) - x_target = x[:size[1]] - x = self.convs[i]((x, x_target), edge_index) - x = x + self.skips[i](x_target) - - if i != self.num_layers - 1: - x = F.elu(x) - xs.append(x.cpu()) - - pbar.update(batch_size) - - x_all = torch.cat(xs, dim=0) - - pbar.close() - - return x_all - - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -model = GAT(dataset.num_features, 128, dataset.num_classes, num_layers=3, - heads=4) -model = model.to(device) - -x = data.x.to(device) -y = data.y.squeeze().to(device) - - -def train(epoch): - model.train() - - pbar = tqdm(total=train_idx.size(0)) - pbar.set_description(f'Epoch {epoch:02d}') - - total_loss = total_correct = 0 - for batch_size, n_id, adjs in train_loader: - # `adjs` holds a list of `(edge_index, e_id, size)` tuples. - adjs = [adj.to(device) for adj in adjs] - - optimizer.zero_grad() - out = model(x[n_id], adjs) - loss = F.nll_loss(out, y[n_id[:batch_size]]) - loss.backward() - optimizer.step() - - total_loss += float(loss) - total_correct += int(out.argmax(dim=-1).eq(y[n_id[:batch_size]]).sum()) - pbar.update(batch_size) - - pbar.close() - - loss = total_loss / len(train_loader) - approx_acc = total_correct / train_idx.size(0) - - return loss, approx_acc - - -@torch.no_grad() -def test(): - model.eval() - - out = model.inference(x) - - y_true = y.cpu().unsqueeze(-1) - y_pred = out.argmax(dim=-1, keepdim=True) - - train_acc = evaluator.eval({ - 'y_true': y_true[split_idx['train']], - 'y_pred': y_pred[split_idx['train']], - })['acc'] - val_acc = evaluator.eval({ - 'y_true': y_true[split_idx['valid']], - 'y_pred': y_pred[split_idx['valid']], - })['acc'] - test_acc = evaluator.eval({ - 'y_true': y_true[split_idx['test']], - 'y_pred': y_pred[split_idx['test']], - })['acc'] - - return train_acc, val_acc, test_acc - - -test_accs = [] -for run in range(1, 11): - print('') - print(f'Run {run:02d}:') - print('') - - model.reset_parameters() - optimizer = torch.optim.Adam(model.parameters(), lr=0.001) - - best_val_acc = final_test_acc = 0 - for epoch in range(1, 101): - loss, acc = train(epoch) - print(f'Epoch {epoch:02d}, Loss: {loss:.4f}, Approx. Train: {acc:.4f}') - - if epoch > 50 and epoch % 10 == 0: - train_acc, val_acc, test_acc = test() - print(f'Train: {train_acc:.4f}, Val: {val_acc:.4f}, ' - f'Test: {test_acc:.4f}') - - if val_acc > best_val_acc: - best_val_acc = val_acc - final_test_acc = test_acc - test_accs.append(final_test_acc) - -test_acc = torch.tensor(test_accs) -print('============================') -print(f'Final Test: {test_acc.mean():.4f} ± {test_acc.std():.4f}') diff --git a/pytorch_geometric-2.3.1/examples/ogbn_products_sage.py b/pytorch_geometric-2.3.1/examples/ogbn_products_sage.py deleted file mode 100644 index 0ff2406..0000000 --- a/pytorch_geometric-2.3.1/examples/ogbn_products_sage.py +++ /dev/null @@ -1,176 +0,0 @@ -# Reaches around 0.7870 ± 0.0036 test accuracy. - -import os.path as osp - -import torch -import torch.nn.functional as F -from ogb.nodeproppred import Evaluator, PygNodePropPredDataset -from tqdm import tqdm - -from torch_geometric.loader import NeighborSampler -from torch_geometric.nn import SAGEConv - -root = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'products') -dataset = PygNodePropPredDataset('ogbn-products', root) -split_idx = dataset.get_idx_split() -evaluator = Evaluator(name='ogbn-products') -data = dataset[0] - -train_idx = split_idx['train'] -train_loader = NeighborSampler(data.edge_index, node_idx=train_idx, - sizes=[15, 10, 5], batch_size=1024, - shuffle=True, num_workers=12) -subgraph_loader = NeighborSampler(data.edge_index, node_idx=None, sizes=[-1], - batch_size=4096, shuffle=False, - num_workers=12) - - -class SAGE(torch.nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, num_layers): - super().__init__() - - self.num_layers = num_layers - - self.convs = torch.nn.ModuleList() - self.convs.append(SAGEConv(in_channels, hidden_channels)) - for _ in range(num_layers - 2): - self.convs.append(SAGEConv(hidden_channels, hidden_channels)) - self.convs.append(SAGEConv(hidden_channels, out_channels)) - - def reset_parameters(self): - for conv in self.convs: - conv.reset_parameters() - - def forward(self, x, adjs): - # `train_loader` computes the k-hop neighborhood of a batch of nodes, - # and returns, for each layer, a bipartite graph object, holding the - # bipartite edges `edge_index`, the index `e_id` of the original edges, - # and the size/shape `size` of the bipartite graph. - # Target nodes are also included in the source nodes so that one can - # easily apply skip-connections or add self-loops. - for i, (edge_index, _, size) in enumerate(adjs): - x_target = x[:size[1]] # Target nodes are always placed first. - x = self.convs[i]((x, x_target), edge_index) - if i != self.num_layers - 1: - x = F.relu(x) - x = F.dropout(x, p=0.5, training=self.training) - return x.log_softmax(dim=-1) - - def inference(self, x_all): - pbar = tqdm(total=x_all.size(0) * self.num_layers) - pbar.set_description('Evaluating') - - # Compute representations of nodes layer by layer, using *all* - # available edges. This leads to faster computation in contrast to - # immediately computing the final representations of each batch. - total_edges = 0 - for i in range(self.num_layers): - xs = [] - for batch_size, n_id, adj in subgraph_loader: - edge_index, _, size = adj.to(device) - total_edges += edge_index.size(1) - x = x_all[n_id].to(device) - x_target = x[:size[1]] - x = self.convs[i]((x, x_target), edge_index) - if i != self.num_layers - 1: - x = F.relu(x) - xs.append(x.cpu()) - - pbar.update(batch_size) - - x_all = torch.cat(xs, dim=0) - - pbar.close() - - return x_all - - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -model = SAGE(dataset.num_features, 256, dataset.num_classes, num_layers=3) -model = model.to(device) - -x = data.x.to(device) -y = data.y.squeeze().to(device) - - -def train(epoch): - model.train() - - pbar = tqdm(total=train_idx.size(0)) - pbar.set_description(f'Epoch {epoch:02d}') - - total_loss = total_correct = 0 - for batch_size, n_id, adjs in train_loader: - # `adjs` holds a list of `(edge_index, e_id, size)` tuples. - adjs = [adj.to(device) for adj in adjs] - - optimizer.zero_grad() - out = model(x[n_id], adjs) - loss = F.nll_loss(out, y[n_id[:batch_size]]) - loss.backward() - optimizer.step() - - total_loss += float(loss) - total_correct += int(out.argmax(dim=-1).eq(y[n_id[:batch_size]]).sum()) - pbar.update(batch_size) - - pbar.close() - - loss = total_loss / len(train_loader) - approx_acc = total_correct / train_idx.size(0) - - return loss, approx_acc - - -@torch.no_grad() -def test(): - model.eval() - - out = model.inference(x) - - y_true = y.cpu().unsqueeze(-1) - y_pred = out.argmax(dim=-1, keepdim=True) - - train_acc = evaluator.eval({ - 'y_true': y_true[split_idx['train']], - 'y_pred': y_pred[split_idx['train']], - })['acc'] - val_acc = evaluator.eval({ - 'y_true': y_true[split_idx['valid']], - 'y_pred': y_pred[split_idx['valid']], - })['acc'] - test_acc = evaluator.eval({ - 'y_true': y_true[split_idx['test']], - 'y_pred': y_pred[split_idx['test']], - })['acc'] - - return train_acc, val_acc, test_acc - - -test_accs = [] -for run in range(1, 11): - print('') - print(f'Run {run:02d}:') - print('') - - model.reset_parameters() - optimizer = torch.optim.Adam(model.parameters(), lr=0.003) - - best_val_acc = final_test_acc = 0 - for epoch in range(1, 21): - loss, acc = train(epoch) - print(f'Epoch {epoch:02d}, Loss: {loss:.4f}, Approx. Train: {acc:.4f}') - - if epoch > 5: - train_acc, val_acc, test_acc = test() - print(f'Train: {train_acc:.4f}, Val: {val_acc:.4f}, ' - f'Test: {test_acc:.4f}') - - if val_acc > best_val_acc: - best_val_acc = val_acc - final_test_acc = test_acc - test_accs.append(final_test_acc) - -test_acc = torch.tensor(test_accs) -print('============================') -print(f'Final Test: {test_acc.mean():.4f} ± {test_acc.std():.4f}') diff --git a/pytorch_geometric-2.3.1/examples/rect.py b/pytorch_geometric-2.3.1/examples/rect.py deleted file mode 100644 index a89e6ff..0000000 --- a/pytorch_geometric-2.3.1/examples/rect.py +++ /dev/null @@ -1,66 +0,0 @@ -import argparse -import copy -import os.path as osp - -import torch -from sklearn.linear_model import LogisticRegression - -import torch_geometric.transforms as T -from torch_geometric.datasets import Planetoid -from torch_geometric.nn import RECT_L - -# RECT focuses on the zero-shot, i.e. completely-imbalanced label setting: -# For this, we first remove "unseen" classes from the training set and train a -# RECT (or more specifically its supervised part RECT-L) model in the zero-shot -# label scenario. Lastly, we train a simple classifier to evaluate the final -# performance of the embeddings based on the original labels. - -# Datasets Citeseer Cora Pubmed -# Unseen Classes [1, 2, 5] [3, 4] [1, 2, 3] [3, 4, 6] [2] -# RECT-L 66.30 68.20 74.60 71.20 75.30 -# GCN 51.80 55.70 55.80 57.10 59.80 -# NodeFeats 61.40 61.40 57.50 57.50 73.10 - -parser = argparse.ArgumentParser() -parser.add_argument('--dataset', type=str, default='Cora', - choices=['Cora', 'CiteSeer', 'PubMed']) -parser.add_argument('--unseen-classes', type=int, nargs='*', default=[1, 2, 3]) -args = parser.parse_args() - -path = osp.join(osp.dirname(osp.realpath(__file__)), '../data/Planetoid') -train_mask_original = Planetoid(path, args.dataset)[0].train_mask.clone() -transform = T.Compose([ - T.NormalizeFeatures(), - T.SVDFeatureReduction(200), - T.GDC(), -]) -dataset = Planetoid(path, args.dataset, transform=transform) -data = dataset[0] -zs_data = T.RemoveTrainingClasses(args.unseen_classes)(copy.copy(data)) - -model = RECT_L(200, 200, normalize=False, dropout=0.0) -zs_data.y = model.get_semantic_labels(zs_data.x, zs_data.y, zs_data.train_mask) - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -model, zs_data = model.to(device), zs_data.to(device) - -criterion = torch.nn.MSELoss(reduction='sum') -optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=5e-4) - -model.train() -for epoch in range(1, 201): - optimizer.zero_grad() - out = model(zs_data.x, zs_data.edge_index, zs_data.edge_attr) - loss = criterion(out[zs_data.train_mask], zs_data.y) - loss.backward() - optimizer.step() - print(f'Epoch {epoch:03d}, Loss {loss:.4f}') - -model.eval() -with torch.no_grad(): - h = model.embed(zs_data.x, zs_data.edge_index, zs_data.edge_attr).cpu() - -reg = LogisticRegression() -reg.fit(h[data.train_mask].numpy(), data.y[data.train_mask].numpy()) -test_acc = reg.score(h[data.test_mask].numpy(), data.y[data.test_mask].numpy()) -print(f'Test Acc: {test_acc:.4f}') diff --git a/pytorch_geometric-2.3.1/examples/rev_gnn.py b/pytorch_geometric-2.3.1/examples/rev_gnn.py deleted file mode 100644 index aef5fcd..0000000 --- a/pytorch_geometric-2.3.1/examples/rev_gnn.py +++ /dev/null @@ -1,197 +0,0 @@ -# Peak GPU memory usage is around 1.57 G -# | RevGNN Models | Test Acc | Val Acc | -# |-------------------------|-----------------|-----------------| -# | 112 layers 160 channels | 0.8307 ± 0.0030 | 0.9290 ± 0.0007 | -# | 7 layers 160 channels | 0.8276 ± 0.0027 | 0.9272 ± 0.0006 | - -import os.path as osp - -import torch -import torch.nn.functional as F -from torch.nn import LayerNorm, Linear -from tqdm import tqdm - -import torch_geometric.transforms as T -from torch_geometric.loader import RandomNodeLoader -from torch_geometric.nn import GroupAddRev, SAGEConv -from torch_geometric.typing import SparseTensor -from torch_geometric.utils import index_to_mask - - -class GNNBlock(torch.nn.Module): - def __init__(self, in_channels, out_channels): - super().__init__() - self.norm = LayerNorm(in_channels, elementwise_affine=True) - self.conv = SAGEConv(in_channels, out_channels) - - def reset_parameters(self): - self.norm.reset_parameters() - self.conv.reset_parameters() - - def forward(self, x, edge_index, dropout_mask=None): - x = self.norm(x).relu() - if self.training and dropout_mask is not None: - x = x * dropout_mask - return self.conv(x, edge_index) - - -class RevGNN(torch.nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, num_layers, - dropout, num_groups=2): - super().__init__() - - self.dropout = dropout - - self.lin1 = Linear(in_channels, hidden_channels) - self.lin2 = Linear(hidden_channels, out_channels) - self.norm = LayerNorm(hidden_channels, elementwise_affine=True) - - assert hidden_channels % num_groups == 0 - self.convs = torch.nn.ModuleList() - for _ in range(num_layers): - conv = GNNBlock( - hidden_channels // num_groups, - hidden_channels // num_groups, - ) - self.convs.append(GroupAddRev(conv, num_groups=num_groups)) - - def reset_parameters(self): - self.lin1.reset_parameters() - self.lin2.reset_parameters() - self.norm.reset_parameters() - for conv in self.convs: - conv.reset_parameters() - - def forward(self, x, edge_index): - x = self.lin1(x) - - # Generate a dropout mask which will be shared across GNN blocks: - mask = None - if self.training and self.dropout > 0: - mask = torch.zeros_like(x).bernoulli_(1 - self.dropout) - mask = mask.requires_grad_(False) - mask = mask / (1 - self.dropout) - - for conv in self.convs: - x = conv(x, edge_index, mask) - x = self.norm(x).relu() - x = F.dropout(x, p=self.dropout, training=self.training) - return self.lin2(x) - - -from ogb.nodeproppred import Evaluator, PygNodePropPredDataset # noqa - -transform = T.AddSelfLoops() -root = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'products') -dataset = PygNodePropPredDataset('ogbn-products', root, transform=transform) -evaluator = Evaluator(name='ogbn-products') - -data = dataset[0] -split_idx = dataset.get_idx_split() -for split in ['train', 'valid', 'test']: - data[f'{split}_mask'] = index_to_mask(split_idx[split], data.y.shape[0]) - -train_loader = RandomNodeLoader(data, num_parts=10, shuffle=True, - num_workers=5) -# Increase the num_parts of the test loader if you cannot fit -# the full batch graph into your GPU: -test_loader = RandomNodeLoader(data, num_parts=1, num_workers=5) - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -model = RevGNN( - in_channels=dataset.num_features, - hidden_channels=160, - out_channels=dataset.num_classes, - num_layers=7, # You can try 1000 layers for fun - dropout=0.5, - num_groups=2, -).to(device) -optimizer = torch.optim.Adam(model.parameters(), lr=0.003) - - -def train(epoch): - model.train() - - pbar = tqdm(total=len(train_loader)) - pbar.set_description(f'Training epoch: {epoch:03d}') - - total_loss = total_examples = 0 - for data in train_loader: - data = data.to(device) - optimizer.zero_grad() - - # Memory-efficient aggregations: - adj_t = SparseTensor.from_edge_index(data.edge_index).t() - out = model(data.x, adj_t)[data.train_mask] - loss = F.cross_entropy(out, data.y[data.train_mask].view(-1)) - loss.backward() - optimizer.step() - - total_loss += float(loss) * int(data.train_mask.sum()) - total_examples += int(data.train_mask.sum()) - pbar.update(1) - - pbar.close() - - return total_loss / total_examples - - -@torch.no_grad() -def test(epoch): - model.eval() - - y_true = {"train": [], "valid": [], "test": []} - y_pred = {"train": [], "valid": [], "test": []} - - pbar = tqdm(total=len(test_loader)) - pbar.set_description(f'Evaluating epoch: {epoch:03d}') - - for data in test_loader: - data = data.to(device) - - # Memory-efficient aggregations - adj_t = SparseTensor.from_edge_index(data.edge_index).t() - out = model(data.x, adj_t).argmax(dim=-1, keepdim=True) - - for split in ['train', 'valid', 'test']: - mask = data[f'{split}_mask'] - y_true[split].append(data.y[mask].cpu()) - y_pred[split].append(out[mask].cpu()) - - pbar.update(1) - - pbar.close() - - train_acc = evaluator.eval({ - 'y_true': torch.cat(y_true['train'], dim=0), - 'y_pred': torch.cat(y_pred['train'], dim=0), - })['acc'] - - valid_acc = evaluator.eval({ - 'y_true': torch.cat(y_true['valid'], dim=0), - 'y_pred': torch.cat(y_pred['valid'], dim=0), - })['acc'] - - test_acc = evaluator.eval({ - 'y_true': torch.cat(y_true['test'], dim=0), - 'y_pred': torch.cat(y_pred['test'], dim=0), - })['acc'] - - return train_acc, valid_acc, test_acc - - -best_val = 0.0 -final_train = 0.0 -final_test = 0.0 -for epoch in range(1, 1001): - loss = train(epoch) - train_acc, val_acc, test_acc = test(epoch) - if val_acc > best_val: - best_val = val_acc - final_train = train_acc - final_test = test_acc - print(f'Loss: {loss:.4f}, Train: {train_acc:.4f}, Val: {val_acc:.4f}, ' - f'Test: {test_acc:.4f}') - -print(f'Final Train: {final_train:.4f}, Best Val: {best_val:.4f}, ' - f'Final Test: {final_test:.4f}') diff --git a/pytorch_geometric-2.3.1/examples/rgcn.py b/pytorch_geometric-2.3.1/examples/rgcn.py deleted file mode 100644 index 00c738c..0000000 --- a/pytorch_geometric-2.3.1/examples/rgcn.py +++ /dev/null @@ -1,82 +0,0 @@ -import argparse -import os.path as osp - -import torch -import torch.nn.functional as F - -from torch_geometric.datasets import Entities -from torch_geometric.nn import FastRGCNConv, RGCNConv -from torch_geometric.utils import k_hop_subgraph - -parser = argparse.ArgumentParser() -parser.add_argument('--dataset', type=str, default='AIFB', - choices=['AIFB', 'MUTAG', 'BGS', 'AM']) -args = parser.parse_args() - -# Trade memory consumption for faster computation. -if args.dataset in ['AIFB', 'MUTAG']: - RGCNConv = FastRGCNConv - -path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Entities') -dataset = Entities(path, args.dataset) -data = dataset[0] - -# BGS and AM graphs are too big to process them in a full-batch fashion. -# Since our model does only make use of a rather small receptive field, we -# filter the graph to only contain the nodes that are at most 2-hop neighbors -# away from any training/test node. -node_idx = torch.cat([data.train_idx, data.test_idx], dim=0) -node_idx, edge_index, mapping, edge_mask = k_hop_subgraph( - node_idx, 2, data.edge_index, relabel_nodes=True) - -data.num_nodes = node_idx.size(0) -data.edge_index = edge_index -data.edge_type = data.edge_type[edge_mask] -data.train_idx = mapping[:data.train_idx.size(0)] -data.test_idx = mapping[data.train_idx.size(0):] - - -class Net(torch.nn.Module): - def __init__(self): - super().__init__() - self.conv1 = RGCNConv(data.num_nodes, 16, dataset.num_relations, - num_bases=30) - self.conv2 = RGCNConv(16, dataset.num_classes, dataset.num_relations, - num_bases=30) - - def forward(self, edge_index, edge_type): - x = F.relu(self.conv1(None, edge_index, edge_type)) - x = self.conv2(x, edge_index, edge_type) - return F.log_softmax(x, dim=1) - - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -device = torch.device('cpu') if args.dataset == 'AM' else device -model, data = Net().to(device), data.to(device) -optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=0.0005) - - -def train(): - model.train() - optimizer.zero_grad() - out = model(data.edge_index, data.edge_type) - loss = F.nll_loss(out[data.train_idx], data.train_y) - loss.backward() - optimizer.step() - return float(loss) - - -@torch.no_grad() -def test(): - model.eval() - pred = model(data.edge_index, data.edge_type).argmax(dim=-1) - train_acc = float((pred[data.train_idx] == data.train_y).float().mean()) - test_acc = float((pred[data.test_idx] == data.test_y).float().mean()) - return train_acc, test_acc - - -for epoch in range(1, 51): - loss = train() - train_acc, test_acc = test() - print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}, Train: {train_acc:.4f} ' - f'Test: {test_acc:.4f}') diff --git a/pytorch_geometric-2.3.1/examples/sgc.py b/pytorch_geometric-2.3.1/examples/sgc.py deleted file mode 100644 index 087d231..0000000 --- a/pytorch_geometric-2.3.1/examples/sgc.py +++ /dev/null @@ -1,58 +0,0 @@ -import os.path as osp - -import torch -import torch.nn.functional as F - -from torch_geometric.datasets import Planetoid -from torch_geometric.nn import SGConv - -dataset = 'Cora' -path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', dataset) -dataset = Planetoid(path, dataset) -data = dataset[0] - - -class Net(torch.nn.Module): - def __init__(self): - super().__init__() - self.conv1 = SGConv(dataset.num_features, dataset.num_classes, K=2, - cached=True) - - def forward(self): - x, edge_index = data.x, data.edge_index - x = self.conv1(x, edge_index) - return F.log_softmax(x, dim=1) - - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -model, data = Net().to(device), data.to(device) -optimizer = torch.optim.Adam(model.parameters(), lr=0.2, weight_decay=0.005) - - -def train(): - model.train() - optimizer.zero_grad() - F.nll_loss(model()[data.train_mask], data.y[data.train_mask]).backward() - optimizer.step() - - -@torch.no_grad() -def test(): - model.eval() - out, accs = model(), [] - for _, mask in data('train_mask', 'val_mask', 'test_mask'): - pred = out[mask].argmax(1) - acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item() - accs.append(acc) - return accs - - -best_val_acc = test_acc = 0 -for epoch in range(1, 101): - train() - train_acc, val_acc, tmp_test_acc = test() - if val_acc > best_val_acc: - best_val_acc = val_acc - test_acc = tmp_test_acc - print(f'Epoch: {epoch:03d}, Train: {train_acc:.4f}, ' - f'Val: {best_val_acc:.4f}, Test: {test_acc:.4f}') diff --git a/pytorch_geometric-2.3.1/examples/tgn.py b/pytorch_geometric-2.3.1/examples/tgn.py deleted file mode 100644 index a7c5259..0000000 --- a/pytorch_geometric-2.3.1/examples/tgn.py +++ /dev/null @@ -1,201 +0,0 @@ -# This code achieves a performance of around 96.60%. However, it is not -# directly comparable to the results reported by the TGN paper since a -# slightly different evaluation setup is used here. -# In particular, predictions in the same batch are made in parallel, i.e. -# predictions for interactions later in the batch have no access to any -# information whatsoever about previous interactions in the same batch. -# On the contrary, when sampling node neighborhoods for interactions later in -# the batch, the TGN paper code has access to previous interactions in the -# batch. -# While both approaches are correct, together with the authors of the paper we -# decided to present this version here as it is more realsitic and a better -# test bed for future methods. - -import os.path as osp - -import torch -from sklearn.metrics import average_precision_score, roc_auc_score -from torch.nn import Linear - -from torch_geometric.datasets import JODIEDataset -from torch_geometric.loader import TemporalDataLoader -from torch_geometric.nn import TGNMemory, TransformerConv -from torch_geometric.nn.models.tgn import ( - IdentityMessage, - LastAggregator, - LastNeighborLoader, -) - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - -path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'JODIE') -dataset = JODIEDataset(path, name='wikipedia') -data = dataset[0] - -# For small datasets, we can put the whole dataset on GPU and thus avoid -# expensive memory transfer costs for mini-batches: -data = data.to(device) - -# Ensure to only sample actual destination nodes as negatives. -min_dst_idx, max_dst_idx = int(data.dst.min()), int(data.dst.max()) -train_data, val_data, test_data = data.train_val_test_split( - val_ratio=0.15, test_ratio=0.15) - -train_loader = TemporalDataLoader(train_data, batch_size=200) -val_loader = TemporalDataLoader(val_data, batch_size=200) -test_loader = TemporalDataLoader(test_data, batch_size=200) - -neighbor_loader = LastNeighborLoader(data.num_nodes, size=10, device=device) - - -class GraphAttentionEmbedding(torch.nn.Module): - def __init__(self, in_channels, out_channels, msg_dim, time_enc): - super().__init__() - self.time_enc = time_enc - edge_dim = msg_dim + time_enc.out_channels - self.conv = TransformerConv(in_channels, out_channels // 2, heads=2, - dropout=0.1, edge_dim=edge_dim) - - def forward(self, x, last_update, edge_index, t, msg): - rel_t = last_update[edge_index[0]] - t - rel_t_enc = self.time_enc(rel_t.to(x.dtype)) - edge_attr = torch.cat([rel_t_enc, msg], dim=-1) - return self.conv(x, edge_index, edge_attr) - - -class LinkPredictor(torch.nn.Module): - def __init__(self, in_channels): - super().__init__() - self.lin_src = Linear(in_channels, in_channels) - self.lin_dst = Linear(in_channels, in_channels) - self.lin_final = Linear(in_channels, 1) - - def forward(self, z_src, z_dst): - h = self.lin_src(z_src) + self.lin_dst(z_dst) - h = h.relu() - return self.lin_final(h) - - -memory_dim = time_dim = embedding_dim = 100 - -memory = TGNMemory( - data.num_nodes, - data.msg.size(-1), - memory_dim, - time_dim, - message_module=IdentityMessage(data.msg.size(-1), memory_dim, time_dim), - aggregator_module=LastAggregator(), -).to(device) - -gnn = GraphAttentionEmbedding( - in_channels=memory_dim, - out_channels=embedding_dim, - msg_dim=data.msg.size(-1), - time_enc=memory.time_enc, -).to(device) - -link_pred = LinkPredictor(in_channels=embedding_dim).to(device) - -optimizer = torch.optim.Adam( - set(memory.parameters()) | set(gnn.parameters()) - | set(link_pred.parameters()), lr=0.0001) -criterion = torch.nn.BCEWithLogitsLoss() - -# Helper vector to map global node indices to local ones. -assoc = torch.empty(data.num_nodes, dtype=torch.long, device=device) - - -def train(): - memory.train() - gnn.train() - link_pred.train() - - memory.reset_state() # Start with a fresh memory. - neighbor_loader.reset_state() # Start with an empty graph. - - total_loss = 0 - for batch in train_loader: - batch = batch.to(device) - optimizer.zero_grad() - - src, pos_dst, t, msg = batch.src, batch.dst, batch.t, batch.msg - - # Sample negative destination nodes. - neg_dst = torch.randint(min_dst_idx, max_dst_idx + 1, (src.size(0), ), - dtype=torch.long, device=device) - - n_id = torch.cat([src, pos_dst, neg_dst]).unique() - n_id, edge_index, e_id = neighbor_loader(n_id) - assoc[n_id] = torch.arange(n_id.size(0), device=device) - - # Get updated memory of all nodes involved in the computation. - z, last_update = memory(n_id) - z = gnn(z, last_update, edge_index, data.t[e_id].to(device), - data.msg[e_id].to(device)) - - pos_out = link_pred(z[assoc[src]], z[assoc[pos_dst]]) - neg_out = link_pred(z[assoc[src]], z[assoc[neg_dst]]) - - loss = criterion(pos_out, torch.ones_like(pos_out)) - loss += criterion(neg_out, torch.zeros_like(neg_out)) - - # Update memory and neighbor loader with ground-truth state. - memory.update_state(src, pos_dst, t, msg) - neighbor_loader.insert(src, pos_dst) - - loss.backward() - optimizer.step() - memory.detach() - total_loss += float(loss) * batch.num_events - - return total_loss / train_data.num_events - - -@torch.no_grad() -def test(loader): - memory.eval() - gnn.eval() - link_pred.eval() - - torch.manual_seed(12345) # Ensure deterministic sampling across epochs. - - aps, aucs = [], [] - for batch in loader: - batch = batch.to(device) - src, pos_dst, t, msg = batch.src, batch.dst, batch.t, batch.msg - - neg_dst = torch.randint(min_dst_idx, max_dst_idx + 1, (src.size(0), ), - dtype=torch.long, device=device) - - n_id = torch.cat([src, pos_dst, neg_dst]).unique() - n_id, edge_index, e_id = neighbor_loader(n_id) - assoc[n_id] = torch.arange(n_id.size(0), device=device) - - z, last_update = memory(n_id) - z = gnn(z, last_update, edge_index, data.t[e_id].to(device), - data.msg[e_id].to(device)) - - pos_out = link_pred(z[assoc[src]], z[assoc[pos_dst]]) - neg_out = link_pred(z[assoc[src]], z[assoc[neg_dst]]) - - y_pred = torch.cat([pos_out, neg_out], dim=0).sigmoid().cpu() - y_true = torch.cat( - [torch.ones(pos_out.size(0)), - torch.zeros(neg_out.size(0))], dim=0) - - aps.append(average_precision_score(y_true, y_pred)) - aucs.append(roc_auc_score(y_true, y_pred)) - - memory.update_state(src, pos_dst, t, msg) - neighbor_loader.insert(src, pos_dst) - - return float(torch.tensor(aps).mean()), float(torch.tensor(aucs).mean()) - - -for epoch in range(1, 51): - loss = train() - print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}') - val_ap, val_auc = test(val_loader) - test_ap, test_auc = test(test_loader) - print(f'Val AP: {val_ap:.4f}, Val AUC: {val_auc:.4f}') - print(f'Test AP: {test_ap:.4f}, Test AUC: {test_auc:.4f}') diff --git a/pytorch_geometric-2.3.1/pyproject.toml b/pytorch_geometric-2.3.1/pyproject.toml deleted file mode 100644 index a063673..0000000 --- a/pytorch_geometric-2.3.1/pyproject.toml +++ /dev/null @@ -1,104 +0,0 @@ -[project] -name="torch_geometric" -version="2.3.0" -authors=[ - {name="Matthias Fey", email="matthias@pyg.org"}, -] -description="Graph Neural Network Library for PyTorch" -readme="README.md" -requires-python=">=3.7" -keywords=[ - "deep-learning", - "pytorch", - "geometric-deep-learning", - "graph-neural-networks", - "graph-convolutional-networks", -] -classifiers=[ - "Development Status :: 5 - Production/Stable", - "License :: OSI Approved :: MIT License", - "Programming Language :: Python", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3 :: Only", -] - -dynamic=["dependencies", "optional-dependencies"] - -[project.urls] -homepage="https://pyg.org" -documentation="https://pytorch-geometric.readthedocs.io" -repository="https://github.com/pyg-team/pytorch_geometric.git" -changelog="https://github.com/pyg-team/pytorch_geometric/blob/master/CHANGELOG.md" - -[tool.yapf] -based_on_style = "pep8" -split_before_named_assigns = false -blank_line_before_nested_class_or_def = false - -[tool.pyright] -include = ["torch_geometric/utils/*"] - -[tool.isort] -multi_line_output = 3 -include_trailing_comma = true -skip = [".gitingore", "__init__.py"] - -[tool.pytest.ini_options] -addopts = "--capture=no" -filterwarnings = [ - "ignore:distutils:DeprecationWarning", - "ignore:'torch_geometric.contrib' contains experimental code:UserWarning", - # Filter `torch` warnings: - "ignore:The PyTorch API of nested tensors is in prototype stage:UserWarning", - "ignore:scatter_reduce():UserWarning", - "ignore:Sparse CSR tensor support is in beta state:UserWarning", - "ignore:Sparse CSC tensor support is in beta state:UserWarning", - "ignore:torch.distributed._sharded_tensor will be deprecated:DeprecationWarning", - # Filter `captum` warnings: - "ignore:Setting backward hooks on ReLU activations:UserWarning", - "ignore:.*did not already require gradients, required_grads has been set automatically:UserWarning", - # Filter `pytorch_lightning` warnings: - "ignore:GPU available but not used:UserWarning", -] - -[tool.pylint.messages_control] -disable = [ - "import-outside-toplevel", - "missing-module-docstring", - "missing-class-docstring", - "missing-function-docstring", - "empty-docstring", - "import-error", - "too-many-arguments", - "arguments-differ", - "invalid-name", - "redefined-builtin", -] -attr-rgx = "[A-Za-z_][A-Za-z0-9_]*$" -argument-rgx = "[A-Za-z_][A-Za-z0-9_]*$" -variable-rgx = "[A-Za-z_][A-Za-z0-9_]*$" -generated-members = ["torch.*"] - -[tool.coverage.run] -source = ["torch_geometric"] -omit = [ - "torch_geometric/datasets/*", - "torch_geometric/data/extract.py", - "torch_geometric/nn/data_parallel.py", -] - -[tool.coverage.report] -exclude_lines = [ - "pragma: no cover", - "pass", - "raise", - "except", - "register_parameter", - "warn", - "torch.cuda.is_available", - "WITH_PT2", -] diff --git a/pytorch_geometric-2.3.1/setup.cfg b/pytorch_geometric-2.3.1/setup.cfg deleted file mode 100644 index 9f1f504..0000000 --- a/pytorch_geometric-2.3.1/setup.cfg +++ /dev/null @@ -1,9 +0,0 @@ -[aliases] -test=pytest - -[flake8] -ignore= - # ignore overload redefinition - F811, - # allow line breaks before/after binary operators - W503, W504, diff --git a/pytorch_geometric-2.3.1/setup.py b/pytorch_geometric-2.3.1/setup.py deleted file mode 100644 index dd69899..0000000 --- a/pytorch_geometric-2.3.1/setup.py +++ /dev/null @@ -1,81 +0,0 @@ -from setuptools import find_packages, setup - -__version__ = '2.3.0' - -install_requires = [ - 'tqdm', - 'numpy', - 'scipy', - 'jinja2', - 'requests', - 'pyparsing', - 'scikit-learn', - 'psutil>=5.8.0', -] - -graphgym_requires = [ - 'yacs', - 'hydra-core', - 'protobuf<4.21', - 'pytorch-lightning', -] - -modelhub_requires = [ - 'huggingface_hub', -] - -full_requires = graphgym_requires + modelhub_requires + [ - 'ase', - 'h5py', - 'numba', - 'sympy', - 'pandas', - 'captum', - 'rdflib', - 'trimesh', - 'networkx', - 'graphviz', - 'tabulate', - 'matplotlib', - 'torchmetrics', - 'scikit-image', - 'pytorch-memlab', - 'pgmpy', - 'opt_einsum', # required for pgmpy - 'statsmodels', -] - -benchmark_requires = [ - 'protobuf<4.21', - 'wandb', - 'pandas', - 'networkx', - 'matplotlib', -] - -test_requires = [ - 'pytest', - 'pytest-cov', - 'onnx', - 'onnxruntime', -] - -dev_requires = test_requires + [ - 'pre-commit', -] - -setup( - name='torch_geometric', - version=__version__, - install_requires=install_requires, - extras_require={ - 'graphgym': graphgym_requires, - 'modelhub': modelhub_requires, - 'full': full_requires, - 'benchmark': benchmark_requires, - 'test': test_requires, - 'dev': dev_requires, - }, - packages=find_packages(), - include_package_data=True, # Ensure that `*.jinja` files are found. -) diff --git a/pytorch_geometric-2.3.1/test/contrib/explain/test_graphmask_explainer.py b/pytorch_geometric-2.3.1/test/contrib/explain/test_graphmask_explainer.py deleted file mode 100644 index ba5f2b6..0000000 --- a/pytorch_geometric-2.3.1/test/contrib/explain/test_graphmask_explainer.py +++ /dev/null @@ -1,77 +0,0 @@ -import pytest -import torch - -from torch_geometric.contrib.explain import GraphMaskExplainer -from torch_geometric.explain import Explainer -from torch_geometric.explain.config import ( - ModelConfig, - ModelMode, - ModelTaskLevel, -) -from torch_geometric.nn import GCNConv, global_add_pool - - -class GCN(torch.nn.Module): - def __init__(self, model_config: ModelConfig): - super().__init__() - self.model_config = model_config - assert model_config.mode == ModelMode.binary_classification - - self.conv1 = GCNConv(3, 16) - self.conv2 = GCNConv(16, 1) - - def forward(self, x, edge_index, batch=None, edge_label_index=None): - x = self.conv1(x, edge_index).relu() - x = self.conv2(x, edge_index) - - if self.model_config.task_level == ModelTaskLevel.graph: - x = global_add_pool(x, batch) - elif self.model_config.task_level == ModelTaskLevel.edge: - assert edge_label_index is not None - x = x[edge_label_index[0]] * x[edge_label_index[1]] - - return x - - -x = torch.randn(8, 3) -edge_index = torch.tensor([ - [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7], - [1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 7, 6], -]) -batch = torch.tensor([0, 0, 0, 1, 1, 2, 2, 2]) -edge_label_index = torch.tensor([[0, 1, 2], [3, 4, 5]]) - - -@pytest.mark.parametrize('task_level', ['node', 'edge', 'graph']) -def test_graph_mask_explainer(task_level): - model_config = ModelConfig( - mode='binary_classification', - task_level=task_level, - return_type='raw', - ) - - model = GCN(model_config) - - explainer = Explainer( - model=model, - algorithm=GraphMaskExplainer(2, epochs=5, log=False), - explanation_type='model', - node_mask_type='attributes', - edge_mask_type='object', - model_config=model_config, - ) - - explanation = explainer( - x, - edge_index, - batch=batch, - edge_label_index=edge_label_index, - ) - - assert explanation.node_mask.size() == explanation.x.size() - assert explanation.node_mask.min() >= 0 - assert explanation.node_mask.max() <= 1 - - assert explanation.edge_mask.size() == (explanation.num_edges, ) - assert explanation.edge_mask.min() >= 0 - assert explanation.edge_mask.max() <= 1 diff --git a/pytorch_geometric-2.3.1/test/data/test_batch.py b/pytorch_geometric-2.3.1/test/data/test_batch.py deleted file mode 100644 index 62b836e..0000000 --- a/pytorch_geometric-2.3.1/test/data/test_batch.py +++ /dev/null @@ -1,426 +0,0 @@ -import os.path as osp - -import numpy as np -import torch -from torch_sparse import SparseTensor - -import torch_geometric -from torch_geometric.data import Batch, Data, HeteroData -from torch_geometric.testing import get_random_edge_index - - -def test_batch(): - torch_geometric.set_debug(True) - - x1 = torch.tensor([1, 2, 3], dtype=torch.float) - y1 = 1 - x1_sp = SparseTensor.from_dense(x1.view(-1, 1)) - e1 = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - adj1 = SparseTensor.from_edge_index(e1) - s1 = '1' - array1 = ['1', '2'] - x2 = torch.tensor([1, 2], dtype=torch.float) - y2 = 2 - x2_sp = SparseTensor.from_dense(x2.view(-1, 1)) - e2 = torch.tensor([[0, 1], [1, 0]]) - adj2 = SparseTensor.from_edge_index(e2) - s2 = '2' - array2 = ['3', '4', '5'] - x3 = torch.tensor([1, 2, 3, 4], dtype=torch.float) - y3 = 3 - x3_sp = SparseTensor.from_dense(x3.view(-1, 1)) - e3 = torch.tensor([[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]]) - adj3 = SparseTensor.from_edge_index(e3) - s3 = '3' - array3 = ['6', '7', '8', '9'] - - data1 = Data(x=x1, y=y1, x_sp=x1_sp, edge_index=e1, adj=adj1, s=s1, - array=array1, num_nodes=3) - data2 = Data(x=x2, y=y2, x_sp=x2_sp, edge_index=e2, adj=adj2, s=s2, - array=array2, num_nodes=2) - data3 = Data(x=x3, y=y3, x_sp=x3_sp, edge_index=e3, adj=adj3, s=s3, - array=array3, num_nodes=4) - - batch = Batch.from_data_list([data1]) - assert str(batch) == ('DataBatch(x=[3], edge_index=[2, 4], y=[1], ' - 'x_sp=[3, 1, nnz=3], adj=[3, 3, nnz=4], s=[1], ' - 'array=[1], num_nodes=3, batch=[3], ptr=[2])') - assert batch.num_graphs == len(batch) == 1 - assert batch.x.tolist() == [1, 2, 3] - assert batch.y.tolist() == [1] - assert batch.x_sp.to_dense().view(-1).tolist() == batch.x.tolist() - assert batch.edge_index.tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]] - edge_index = torch.stack(batch.adj.coo()[:2], dim=0) - assert edge_index.tolist() == batch.edge_index.tolist() - assert batch.s == ['1'] - assert batch.array == [['1', '2']] - assert batch.num_nodes == 3 - assert batch.batch.tolist() == [0, 0, 0] - assert batch.ptr.tolist() == [0, 3] - - batch = Batch.from_data_list([data1, data2, data3], follow_batch=['s']) - - assert str(batch) == ('DataBatch(x=[9], edge_index=[2, 12], y=[3], ' - 'x_sp=[9, 1, nnz=9], adj=[9, 9, nnz=12], s=[3], ' - 's_batch=[3], s_ptr=[4], array=[3], num_nodes=9, ' - 'batch=[9], ptr=[4])') - assert batch.num_graphs == len(batch) == 3 - assert batch.x.tolist() == [1, 2, 3, 1, 2, 1, 2, 3, 4] - assert batch.y.tolist() == [1, 2, 3] - assert batch.x_sp.to_dense().view(-1).tolist() == batch.x.tolist() - assert batch.edge_index.tolist() == [[0, 1, 1, 2, 3, 4, 5, 6, 6, 7, 7, 8], - [1, 0, 2, 1, 4, 3, 6, 5, 7, 6, 8, 7]] - edge_index = torch.stack(batch.adj.coo()[:2], dim=0) - assert edge_index.tolist() == batch.edge_index.tolist() - assert batch.s == ['1', '2', '3'] - assert batch.s_batch.tolist() == [0, 1, 2] - assert batch.s_ptr.tolist() == [0, 1, 2, 3] - assert batch.array == [['1', '2'], ['3', '4', '5'], ['6', '7', '8', '9']] - assert batch.num_nodes == 9 - assert batch.batch.tolist() == [0, 0, 0, 1, 1, 2, 2, 2, 2] - assert batch.ptr.tolist() == [0, 3, 5, 9] - - data = batch[0] - assert str(data) == ("Data(x=[3], edge_index=[2, 4], y=[1], " - "x_sp=[3, 1, nnz=3], adj=[3, 3, nnz=4], s='1', " - "array=[2], num_nodes=3)") - data = batch[1] - assert str(data) == ("Data(x=[2], edge_index=[2, 2], y=[1], " - "x_sp=[2, 1, nnz=2], adj=[2, 2, nnz=2], s='2', " - "array=[3], num_nodes=2)") - - data = batch[2] - assert str(data) == ("Data(x=[4], edge_index=[2, 6], y=[1], " - "x_sp=[4, 1, nnz=4], adj=[4, 4, nnz=6], s='3', " - "array=[4], num_nodes=4)") - - assert len(batch.index_select([1, 0])) == 2 - assert len(batch.index_select(torch.tensor([1, 0]))) == 2 - assert len(batch.index_select(torch.tensor([True, False]))) == 1 - assert len(batch.index_select(np.array([1, 0], dtype=np.int64))) == 2 - assert len(batch.index_select(np.array([True, False]))) == 1 - assert len(batch[:2]) == 2 - - data_list = batch.to_data_list() - assert len(data_list) == 3 - - assert len(data_list[0]) == 8 - assert data_list[0].x.tolist() == [1, 2, 3] - assert data_list[0].y.tolist() == [1] - assert data_list[0].x_sp.to_dense().view(-1).tolist() == [1, 2, 3] - assert data_list[0].edge_index.tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]] - edge_index = torch.stack(data_list[0].adj.coo()[:2], dim=0) - assert edge_index.tolist() == data_list[0].edge_index.tolist() - assert data_list[0].s == '1' - assert data_list[0].array == ['1', '2'] - assert data_list[0].num_nodes == 3 - - assert len(data_list[1]) == 8 - assert data_list[1].x.tolist() == [1, 2] - assert data_list[1].y.tolist() == [2] - assert data_list[1].x_sp.to_dense().view(-1).tolist() == [1, 2] - assert data_list[1].edge_index.tolist() == [[0, 1], [1, 0]] - edge_index = torch.stack(data_list[1].adj.coo()[:2], dim=0) - assert edge_index.tolist() == data_list[1].edge_index.tolist() - assert data_list[1].s == '2' - assert data_list[1].array == ['3', '4', '5'] - assert data_list[1].num_nodes == 2 - - assert len(data_list[2]) == 8 - assert data_list[2].x.tolist() == [1, 2, 3, 4] - assert data_list[2].y.tolist() == [3] - assert data_list[2].x_sp.to_dense().view(-1).tolist() == [1, 2, 3, 4] - assert data_list[2].edge_index.tolist() == [[0, 1, 1, 2, 2, 3], - [1, 0, 2, 1, 3, 2]] - edge_index = torch.stack(data_list[2].adj.coo()[:2], dim=0) - assert edge_index.tolist() == data_list[2].edge_index.tolist() - assert data_list[2].s == '3' - assert data_list[2].array == ['6', '7', '8', '9'] - assert data_list[2].num_nodes == 4 - - torch_geometric.set_debug(True) - - -def test_batching_with_new_dimension(): - torch_geometric.set_debug(True) - - class MyData(Data): - def __cat_dim__(self, key, value, *args, **kwargs): - if key == 'foo': - return None - else: - return super().__cat_dim__(key, value, *args, **kwargs) - - x1 = torch.tensor([1, 2, 3], dtype=torch.float) - foo1 = torch.randn(4) - y1 = torch.tensor(1) - - x2 = torch.tensor([1, 2], dtype=torch.float) - foo2 = torch.randn(4) - y2 = torch.tensor(2) - - batch = Batch.from_data_list( - [MyData(x=x1, foo=foo1, y=y1), - MyData(x=x2, foo=foo2, y=y2)]) - - assert str(batch) == ('MyDataBatch(x=[5], y=[2], foo=[2, 4], batch=[5], ' - 'ptr=[3])') - assert batch.num_graphs == len(batch) == 2 - assert batch.x.tolist() == [1, 2, 3, 1, 2] - assert batch.foo.size() == (2, 4) - assert batch.foo[0].tolist() == foo1.tolist() - assert batch.foo[1].tolist() == foo2.tolist() - assert batch.y.tolist() == [1, 2] - assert batch.batch.tolist() == [0, 0, 0, 1, 1] - assert batch.ptr.tolist() == [0, 3, 5] - assert batch.num_graphs == 2 - - data = batch[0] - assert str(data) == ('MyData(x=[3], y=[1], foo=[4])') - data = batch[1] - assert str(data) == ('MyData(x=[2], y=[1], foo=[4])') - - torch_geometric.set_debug(True) - - -def test_pickling(tmp_path): - data = Data(x=torch.randn(5, 16)) - batch = Batch.from_data_list([data, data, data, data]) - assert id(batch._store._parent()) == id(batch) - assert batch.num_nodes == 20 - - # filename = f'{random.randrange(sys.maxsize)}.pt' - path = osp.join(tmp_path, 'batch.pt') - torch.save(batch, path) - assert id(batch._store._parent()) == id(batch) - assert batch.num_nodes == 20 - - batch = torch.load(path) - assert id(batch._store._parent()) == id(batch) - assert batch.num_nodes == 20 - - assert batch.__class__.__name__ == 'DataBatch' - assert batch.num_graphs == len(batch) == 4 - - -def test_recursive_batch(): - data1 = Data( - x={ - '1': torch.randn(10, 32), - '2': torch.randn(20, 48) - }, - edge_index=[ - get_random_edge_index(30, 30, 50), - get_random_edge_index(30, 30, 70) - ], - num_nodes=30, - ) - - data2 = Data( - x={ - '1': torch.randn(20, 32), - '2': torch.randn(40, 48) - }, - edge_index=[ - get_random_edge_index(60, 60, 80), - get_random_edge_index(60, 60, 90) - ], - num_nodes=60, - ) - - batch = Batch.from_data_list([data1, data2]) - - assert batch.num_graphs == len(batch) == 2 - assert batch.num_nodes == 90 - - assert torch.allclose(batch.x['1'], - torch.cat([data1.x['1'], data2.x['1']], dim=0)) - assert torch.allclose(batch.x['2'], - torch.cat([data1.x['2'], data2.x['2']], dim=0)) - assert (batch.edge_index[0].tolist() == torch.cat( - [data1.edge_index[0], data2.edge_index[0] + 30], dim=1).tolist()) - assert (batch.edge_index[1].tolist() == torch.cat( - [data1.edge_index[1], data2.edge_index[1] + 30], dim=1).tolist()) - assert batch.batch.size() == (90, ) - assert batch.ptr.size() == (3, ) - - out1 = batch[0] - assert len(out1) == 3 - assert out1.num_nodes == 30 - assert torch.allclose(out1.x['1'], data1.x['1']) - assert torch.allclose(out1.x['2'], data1.x['2']) - assert out1.edge_index[0].tolist(), data1.edge_index[0].tolist() - assert out1.edge_index[1].tolist(), data1.edge_index[1].tolist() - - out2 = batch[1] - assert len(out2) == 3 - assert out2.num_nodes == 60 - assert torch.allclose(out2.x['1'], data2.x['1']) - assert torch.allclose(out2.x['2'], data2.x['2']) - assert out2.edge_index[0].tolist(), data2.edge_index[0].tolist() - assert out2.edge_index[1].tolist(), data2.edge_index[1].tolist() - - -def test_batching_of_batches(): - data = Data(x=torch.randn(2, 16)) - batch = Batch.from_data_list([data, data]) - - batch = Batch.from_data_list([batch, batch]) - assert batch.num_graphs == len(batch) == 2 - assert batch.x[0:2].tolist() == data.x.tolist() - assert batch.x[2:4].tolist() == data.x.tolist() - assert batch.x[4:6].tolist() == data.x.tolist() - assert batch.x[6:8].tolist() == data.x.tolist() - assert batch.batch.tolist() == [0, 0, 1, 1, 2, 2, 3, 3] - - -def test_hetero_batch(): - e1 = ('p', 'a') - e2 = ('a', 'p') - data1 = HeteroData() - data1['p'].x = torch.randn(100, 128) - data1['a'].x = torch.randn(200, 128) - data1[e1].edge_index = get_random_edge_index(100, 200, 500) - data1[e1].edge_attr = torch.randn(500, 32) - data1[e2].edge_index = get_random_edge_index(200, 100, 400) - data1[e2].edge_attr = torch.randn(400, 32) - - data2 = HeteroData() - data2['p'].x = torch.randn(50, 128) - data2['a'].x = torch.randn(100, 128) - data2[e1].edge_index = get_random_edge_index(50, 100, 300) - data2[e1].edge_attr = torch.randn(300, 32) - data2[e2].edge_index = get_random_edge_index(100, 50, 200) - data2[e2].edge_attr = torch.randn(200, 32) - - batch = Batch.from_data_list([data1, data2]) - - assert batch.num_graphs == len(batch) == 2 - assert batch.num_nodes == 450 - - assert torch.allclose(batch['p'].x[:100], data1['p'].x) - assert torch.allclose(batch['a'].x[:200], data1['a'].x) - assert torch.allclose(batch['p'].x[100:], data2['p'].x) - assert torch.allclose(batch['a'].x[200:], data2['a'].x) - assert (batch[e1].edge_index.tolist() == torch.cat([ - data1[e1].edge_index, - data2[e1].edge_index + torch.tensor([[100], [200]]) - ], 1).tolist()) - assert torch.allclose( - batch[e1].edge_attr, - torch.cat([data1[e1].edge_attr, data2[e1].edge_attr], 0)) - assert (batch[e2].edge_index.tolist() == torch.cat([ - data1[e2].edge_index, - data2[e2].edge_index + torch.tensor([[200], [100]]) - ], 1).tolist()) - assert torch.allclose( - batch[e2].edge_attr, - torch.cat([data1[e2].edge_attr, data2[e2].edge_attr], 0)) - assert batch['p'].batch.size() == (150, ) - assert batch['p'].ptr.size() == (3, ) - assert batch['a'].batch.size() == (300, ) - assert batch['a'].ptr.size() == (3, ) - - out1 = batch[0] - assert len(out1) == 3 - assert out1.num_nodes == 300 - assert torch.allclose(out1['p'].x, data1['p'].x) - assert torch.allclose(out1['a'].x, data1['a'].x) - assert out1[e1].edge_index.tolist() == data1[e1].edge_index.tolist() - assert torch.allclose(out1[e1].edge_attr, data1[e1].edge_attr) - assert out1[e2].edge_index.tolist() == data1[e2].edge_index.tolist() - assert torch.allclose(out1[e2].edge_attr, data1[e2].edge_attr) - - out2 = batch[1] - assert len(out2) == 3 - assert out2.num_nodes == 150 - assert torch.allclose(out2['p'].x, data2['p'].x) - assert torch.allclose(out2['a'].x, data2['a'].x) - assert out2[e1].edge_index.tolist() == data2[e1].edge_index.tolist() - assert torch.allclose(out2[e1].edge_attr, data2[e1].edge_attr) - assert out2[e2].edge_index.tolist() == data2[e2].edge_index.tolist() - assert torch.allclose(out2[e2].edge_attr, data2[e2].edge_attr) - - -def test_pair_data_batching(): - class PairData(Data): - def __inc__(self, key, value, *args, **kwargs): - if key == 'edge_index_s': - return self.x_s.size(0) - if key == 'edge_index_t': - return self.x_t.size(0) - else: - return super().__inc__(key, value, *args, **kwargs) - - x_s = torch.randn(5, 16) - edge_index_s = torch.tensor([ - [0, 0, 0, 0], - [1, 2, 3, 4], - ]) - x_t = torch.randn(4, 16) - edge_index_t = torch.tensor([ - [0, 0, 0], - [1, 2, 3], - ]) - - data = PairData(x_s=x_s, edge_index_s=edge_index_s, x_t=x_t, - edge_index_t=edge_index_t) - batch = Batch.from_data_list([data, data]) - - assert torch.allclose(batch.x_s, torch.cat([x_s, x_s], dim=0)) - assert batch.edge_index_s.tolist() == [[0, 0, 0, 0, 5, 5, 5, 5], - [1, 2, 3, 4, 6, 7, 8, 9]] - - assert torch.allclose(batch.x_t, torch.cat([x_t, x_t], dim=0)) - assert batch.edge_index_t.tolist() == [[0, 0, 0, 4, 4, 4], - [1, 2, 3, 5, 6, 7]] - - -def test_batch_with_empty_list(): - x = torch.randn(4, 1) - edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]]) - data = Data(x=x, edge_index=edge_index, nontensor=[]) - - batch = Batch.from_data_list([data, data]) - assert batch.nontensor == [[], []] - assert batch[0].nontensor == [] - assert batch[1].nontensor == [] - - -def test_nested_follow_batch(): - def tr(n, m): - return torch.rand((n, m)) - - d1 = Data(xs=[tr(4, 3), tr(11, 4), tr(1, 2)], a={"aa": tr(11, 3)}, - x=tr(10, 5)) - d2 = Data(xs=[tr(5, 3), tr(14, 4), tr(3, 2)], a={"aa": tr(2, 3)}, - x=tr(11, 5)) - d3 = Data(xs=[tr(6, 3), tr(15, 4), tr(2, 2)], a={"aa": tr(4, 3)}, - x=tr(9, 5)) - d4 = Data(xs=[tr(4, 3), tr(16, 4), tr(1, 2)], a={"aa": tr(8, 3)}, - x=tr(8, 5)) - - # Dataset - data_list = [d1, d2, d3, d4] - - batch = Batch.from_data_list(data_list, follow_batch=['xs', 'a']) - - # assert shapes - assert batch.xs[0].shape == (19, 3) - assert batch.xs[1].shape == (56, 4) - assert batch.xs[2].shape == (7, 2) - assert batch.a['aa'].shape == (25, 3) - - assert len(batch.xs_batch) == 3 - assert len(batch.a_batch) == 1 - - # assert _batch - assert batch.xs_batch[0].tolist() == \ - [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3] - assert batch.xs_batch[1].tolist() == \ - [0] * 11 + [1] * 14 + [2] * 15 + [3] * 16 - assert batch.xs_batch[2].tolist() == \ - [0] * 1 + [1] * 3 + [2] * 2 + [3] * 1 - - assert batch.a_batch['aa'].tolist() == \ - [0] * 11 + [1] * 2 + [2] * 4 + [3] * 8 diff --git a/pytorch_geometric-2.3.1/test/data/test_dataset.py b/pytorch_geometric-2.3.1/test/data/test_dataset.py deleted file mode 100644 index abe0e06..0000000 --- a/pytorch_geometric-2.3.1/test/data/test_dataset.py +++ /dev/null @@ -1,328 +0,0 @@ -import copy - -import pytest -import torch -from torch_sparse import SparseTensor - -from torch_geometric.data import Data, HeteroData, InMemoryDataset - - -class MyTestDataset(InMemoryDataset): - def __init__(self, data_list, transform=None): - super().__init__('/tmp/MyTestDataset', transform=transform) - self.data, self.slices = self.collate(data_list) - - -def test_in_memory_dataset(): - x1 = torch.Tensor([[1], [1], [1]]) - x2 = torch.Tensor([[2], [2], [2]]) - edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - face = torch.tensor([[0], [1], [2]]) - - data1 = Data(x1, edge_index, face=face, test_int=1, test_str='1') - data1.num_nodes = 10 - - data2 = Data(x2, edge_index, face=face, test_int=2, test_str='2') - data2.num_nodes = 5 - - dataset = MyTestDataset([data1, data2]) - assert str(dataset) == 'MyTestDataset(2)' - assert len(dataset) == 2 - - assert len(dataset[0]) == 6 - assert dataset[0].num_nodes == 10 - assert dataset[0].x.tolist() == x1.tolist() - assert dataset[0].edge_index.tolist() == edge_index.tolist() - assert dataset[0].face.tolist() == face.tolist() - assert dataset[0].test_int == 1 - assert dataset[0].test_str == '1' - - assert len(dataset[1]) == 6 - assert dataset[1].num_nodes == 5 - assert dataset[1].x.tolist() == x2.tolist() - assert dataset[1].edge_index.tolist() == edge_index.tolist() - assert dataset[1].face.tolist() == face.tolist() - assert dataset[1].test_int == 2 - assert dataset[1].test_str == '2' - - with pytest.warns(UserWarning, match="internal storage format"): - dataset.data - - assert torch.equal(dataset.x, torch.cat([x1, x2], dim=0)) - assert dataset.edge_index.tolist() == [ - [0, 1, 1, 2, 10, 11, 11, 12], - [1, 0, 2, 1, 11, 10, 12, 11], - ] - assert torch.equal(dataset[1:].x, x2) - - -def test_in_memory_num_classes(): - dataset = MyTestDataset([Data(), Data()]) - assert dataset.num_classes == 0 - - dataset = MyTestDataset([Data(y=0), Data(y=1)]) - assert dataset.num_classes == 2 - - dataset = MyTestDataset([Data(y=1.5), Data(y=2.5), Data(y=3.5)]) - assert dataset.num_classes == 3 - - dataset = MyTestDataset([ - Data(y=torch.tensor([[0, 1, 0, 1]])), - Data(y=torch.tensor([[1, 0, 0, 0]])), - Data(y=torch.tensor([[0, 0, 1, 0]])), - ]) - assert dataset.num_classes == 4 - - # Test when `__getitem__` returns a tuple of data objects. - def transform(data): - copied_data = copy.copy(data) - copied_data.y += 1 - return data, copied_data, 'foo' - - dataset = MyTestDataset([Data(y=0), Data(y=1)], transform=transform) - assert dataset.num_classes == 3 - - -def test_in_memory_dataset_copy(): - data_list = [Data(x=torch.randn(5, 16)) for _ in range(4)] - dataset = MyTestDataset(data_list) - - copied_dataset = dataset.copy() - assert id(copied_dataset) != id(dataset) - - assert len(copied_dataset) == len(dataset) == 4 - for copied_data, data in zip(copied_dataset, dataset): - assert torch.equal(copied_data.x, data.x) - - copied_dataset = dataset.copy([1, 2]) - assert len(copied_dataset) == 2 - assert torch.equal(copied_dataset[0].x, data_list[1].x) - assert torch.equal(copied_dataset[1].x, data_list[2].x) - - -def test_to_datapipe(): - x = torch.randn(3, 8) - edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - data = Data(x=x, edge_index=edge_index) - dataset = MyTestDataset([data, data]) - - dp = dataset.to_datapipe() - - assert isinstance(dp, torch.utils.data.IterDataPipe) - assert len(dp) == 2 - - assert torch.equal(dataset[0].x, list(dp)[0].x) - assert torch.equal(dataset[0].edge_index, list(dp)[0].edge_index) - assert torch.equal(dataset[1].x, list(dp)[1].x) - assert torch.equal(dataset[1].edge_index, list(dp)[1].edge_index) - - -def test_in_memory_sparse_tensor_dataset(): - x = torch.randn(11, 16) - adj = SparseTensor( - row=torch.tensor([4, 1, 3, 2, 2, 3]), - col=torch.tensor([1, 3, 2, 3, 3, 2]), - sparse_sizes=(11, 11), - ) - data = Data(x=x, adj=adj) - - dataset = MyTestDataset([data, data]) - assert len(dataset) == 2 - assert torch.allclose(dataset[0].x, x) - assert dataset[0].adj.sparse_sizes() == (11, 11) - assert torch.allclose(dataset[1].x, x) - assert dataset[1].adj.sparse_sizes() == (11, 11) - - -def test_collate_with_new_dimension(): - class MyData(Data): - def __cat_dim__(self, key, value, *args, **kwargs): - if key == 'foo': - return None - else: - return super().__cat_dim__(key, value, *args, **kwargs) - - x = torch.tensor([1, 2, 3], dtype=torch.float) - foo = torch.randn(4) - y = torch.tensor(1) - - data = MyData(x=x, foo=foo, y=y) - - dataset = MyTestDataset([data, data]) - assert str(dataset) == 'MyTestDataset(2)' - assert len(dataset) == 2 - - data1 = dataset[0] - assert len(data1) == 3 - assert data1.x.tolist() == x.tolist() - assert data1.foo.tolist() == foo.tolist() - assert data1.y.tolist() == [1] - - data2 = dataset[0] - assert len(data2) == 3 - assert data2.x.tolist() == x.tolist() - assert data2.foo.tolist() == foo.tolist() - assert data2.y.tolist() == [1] - - -def test_hetero_in_memory_dataset(): - data1 = HeteroData() - data1.y = torch.randn(5) - data1['paper'].x = torch.randn(10, 16) - data1['paper', 'paper'].edge_index = torch.randint(0, 10, (2, 30)).long() - - data2 = HeteroData() - data2.y = torch.randn(5) - data2['paper'].x = torch.randn(10, 16) - data2['paper', 'paper'].edge_index = torch.randint(0, 10, (2, 30)).long() - - dataset = MyTestDataset([data1, data2]) - assert str(dataset) == 'MyTestDataset(2)' - assert len(dataset) == 2 - - assert len(dataset[0]) == 3 - assert dataset[0].y.tolist() == data1.y.tolist() - assert dataset[0]['paper'].x.tolist() == data1['paper'].x.tolist() - assert (dataset[0]['paper', 'paper'].edge_index.tolist() == data1[ - 'paper', 'paper'].edge_index.tolist()) - - assert len(dataset[1]) == 3 - assert dataset[1].y.tolist() == data2.y.tolist() - assert dataset[1]['paper'].x.tolist() == data2['paper'].x.tolist() - assert (dataset[1]['paper', 'paper'].edge_index.tolist() == data2[ - 'paper', 'paper'].edge_index.tolist()) - - -def test_override_behavior(): - class DS1(InMemoryDataset): - def __init__(self): - self.enter_download = False - self.enter_process = False - super().__init__() - - def _download(self): - self.enter_download = True - - def _process(self): - self.enter_process = True - - def download(self): - pass - - def process(self): - pass - - class DS2(InMemoryDataset): - def __init__(self): - self.enter_download = False - self.enter_process = False - super().__init__() - - def _download(self): - self.enter_download = True - - def _process(self): - self.enter_process = True - - def process(self): - pass - - class DS3(InMemoryDataset): - def __init__(self): - self.enter_download = False - self.enter_process = False - super().__init__() - - def _download(self): - self.enter_download = True - - def _process(self): - self.enter_process = True - - class DS4(DS1): - pass - - ds = DS1() - assert ds.enter_download - assert ds.enter_process - - ds = DS2() - assert not ds.enter_download - assert ds.enter_process - - ds = DS3() - assert not ds.enter_download - assert not ds.enter_process - - ds = DS4() - assert ds.enter_download - assert ds.enter_process - - -def test_lists_of_tensors_in_memory_dataset(): - def tr(n, m): - return torch.rand((n, m)) - - d1 = Data(xs=[tr(4, 3), tr(11, 4), tr(1, 2)]) - d2 = Data(xs=[tr(5, 3), tr(14, 4), tr(3, 2)]) - d3 = Data(xs=[tr(6, 3), tr(15, 4), tr(2, 2)]) - d4 = Data(xs=[tr(4, 3), tr(16, 4), tr(1, 2)]) - - data_list = [d1, d2, d3, d4] - - dataset = MyTestDataset(data_list) - assert len(dataset) == 4 - assert dataset[0].xs[1].size() == (11, 4) - assert dataset[0].xs[2].size() == (1, 2) - assert dataset[1].xs[0].size() == (5, 3) - assert dataset[2].xs[1].size() == (15, 4) - assert dataset[3].xs[1].size() == (16, 4) - - -def test_lists_of_SparseTensors(): - e1 = torch.tensor([[4, 1, 3, 2, 2, 3], [1, 3, 2, 3, 3, 2]]) - e2 = torch.tensor([[0, 1, 4, 7, 2, 9], [7, 2, 2, 1, 4, 7]]) - e3 = torch.tensor([[3, 5, 1, 2, 3, 3], [5, 0, 2, 1, 3, 7]]) - e4 = torch.tensor([[0, 1, 9, 2, 0, 3], [1, 1, 2, 1, 3, 2]]) - adj1 = SparseTensor.from_edge_index(e1, sparse_sizes=(11, 11)) - adj2 = SparseTensor.from_edge_index(e2, sparse_sizes=(22, 22)) - adj3 = SparseTensor.from_edge_index(e3, sparse_sizes=(12, 12)) - adj4 = SparseTensor.from_edge_index(e4, sparse_sizes=(15, 15)) - - d1 = Data(adj_test=[adj1, adj2]) - d2 = Data(adj_test=[adj3, adj4]) - - data_list = [d1, d2] - dataset = MyTestDataset(data_list) - assert len(dataset) == 2 - assert dataset[0].adj_test[0].sparse_sizes() == (11, 11) - assert dataset[0].adj_test[1].sparse_sizes() == (22, 22) - assert dataset[1].adj_test[0].sparse_sizes() == (12, 12) - assert dataset[1].adj_test[1].sparse_sizes() == (15, 15) - - -def test_file_names_as_property_and_method(): - class MyTestDataset(InMemoryDataset): - def __init__(self): - super().__init__('/tmp/MyTestDataset') - - @property - def raw_file_names(self): - return ['test_file'] - - def download(self): - pass - - MyTestDataset() - - class MyTestDataset(InMemoryDataset): - def __init__(self): - super().__init__('/tmp/MyTestDataset') - - def raw_file_names(self): - return ['test_file'] - - def download(self): - pass - - MyTestDataset() diff --git a/pytorch_geometric-2.3.1/test/data/test_dataset_summary.py b/pytorch_geometric-2.3.1/test/data/test_dataset_summary.py deleted file mode 100644 index 4ebee67..0000000 --- a/pytorch_geometric-2.3.1/test/data/test_dataset_summary.py +++ /dev/null @@ -1,45 +0,0 @@ -import torch - -from torch_geometric.data.summary import Summary -from torch_geometric.datasets import FakeDataset, FakeHeteroDataset -from torch_geometric.testing import withPackage - - -def test_dataset_summary(): - dataset = FakeDataset(num_graphs=10) - num_nodes = torch.Tensor([data.num_nodes for data in dataset]) - num_edges = torch.Tensor([data.num_edges for data in dataset]) - - summary = dataset.get_summary() - - assert summary.name == 'FakeDataset' - assert summary.num_graphs == 10 - - assert summary.num_nodes.mean == num_nodes.mean().item() - assert summary.num_nodes.std == num_nodes.std().item() - assert summary.num_nodes.min == num_nodes.min().item() - assert summary.num_nodes.quantile25 == num_nodes.quantile(0.25).item() - assert summary.num_nodes.median == num_nodes.median().item() - assert summary.num_nodes.quantile75 == num_nodes.quantile(0.75).item() - assert summary.num_nodes.max == num_nodes.max().item() - - assert summary.num_edges.mean == num_edges.mean().item() - assert summary.num_edges.std == num_edges.std().item() - assert summary.num_edges.min == num_edges.min().item() - assert summary.num_edges.quantile25 == num_edges.quantile(0.25).item() - assert summary.num_edges.median == num_edges.median().item() - assert summary.num_edges.quantile75 == num_edges.quantile(0.75).item() - assert summary.num_edges.max == num_edges.max().item() - - -@withPackage('tabulate') -def test_dataset_summary_hetero(): - dataset1 = FakeHeteroDataset(num_graphs=10) - summary1 = Summary.from_dataset(dataset1) - - dataset2 = [data.to_homogeneous() for data in dataset1] - summary2 = Summary.from_dataset(dataset2) - summary2.name = 'FakeHeteroDataset' - - assert summary1 == summary2 - assert str(summary1) == str(summary2) diff --git a/pytorch_geometric-2.3.1/test/loader/test_cluster.py b/pytorch_geometric-2.3.1/test/loader/test_cluster.py deleted file mode 100644 index 1e9e0bf..0000000 --- a/pytorch_geometric-2.3.1/test/loader/test_cluster.py +++ /dev/null @@ -1,102 +0,0 @@ -import pytest -import torch - -from torch_geometric.data import Data -from torch_geometric.loader import ClusterData, ClusterLoader -from torch_geometric.utils import to_dense_adj - -try: - rowptr = torch.tensor([0, 1]) - col = torch.tensor([0]) - torch.ops.torch_sparse.partition(rowptr, col, None, 1, True) - with_metis = True -except RuntimeError: - with_metis = False - - -@pytest.mark.skipif(not with_metis, reason='Not compiled with METIS support') -def test_cluster_gcn(): - adj = torch.tensor([ - [1, 1, 1, 0, 1, 0], - [1, 1, 0, 1, 0, 1], - [1, 0, 1, 0, 1, 0], - [0, 1, 0, 1, 0, 1], - [1, 0, 1, 0, 1, 0], - [0, 1, 0, 1, 0, 1], - ]) - - x = torch.Tensor([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) - edge_index = adj.nonzero(as_tuple=False).t() - edge_attr = torch.arange(edge_index.size(1)) - data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr) - data.num_nodes = 6 - - cluster_data = ClusterData(data, num_parts=2, log=False) - - assert cluster_data.partptr.tolist() == [0, 3, 6] - assert cluster_data.perm.tolist() == [0, 2, 4, 1, 3, 5] - assert cluster_data.data.x.tolist() == [ - [0, 0], - [2, 2], - [4, 4], - [1, 1], - [3, 3], - [5, 5], - ] - assert cluster_data.data.adj.to_dense().tolist() == [ - [0, 2, 3, 1, 0, 0], - [8, 9, 10, 0, 0, 0], - [14, 15, 16, 0, 0, 0], - [4, 0, 0, 5, 6, 7], - [0, 0, 0, 11, 12, 13], - [0, 0, 0, 17, 18, 19], - ] - - data = cluster_data[0] - assert data.num_nodes == 3 - assert data.x.tolist() == [[0, 0], [2, 2], [4, 4]] - assert data.edge_index.tolist() == [[0, 0, 0, 1, 1, 1, 2, 2, 2], - [0, 1, 2, 0, 1, 2, 0, 1, 2]] - assert data.edge_attr.tolist() == [0, 2, 3, 8, 9, 10, 14, 15, 16] - - data = cluster_data[1] - assert data.num_nodes == 3 - assert data.x.tolist() == [[1, 1], [3, 3], [5, 5]] - assert data.edge_index.tolist() == [[0, 0, 0, 1, 1, 1, 2, 2, 2], - [0, 1, 2, 0, 1, 2, 0, 1, 2]] - assert data.edge_attr.tolist() == [5, 6, 7, 11, 12, 13, 17, 18, 19] - - loader = ClusterLoader(cluster_data, batch_size=1) - iterator = iter(loader) - - data = next(iterator) - assert data.x.tolist() == [[0, 0], [2, 2], [4, 4]] - assert data.edge_index.tolist() == [[0, 0, 0, 1, 1, 1, 2, 2, 2], - [0, 1, 2, 0, 1, 2, 0, 1, 2]] - assert data.edge_attr.tolist() == [0, 2, 3, 8, 9, 10, 14, 15, 16] - - data = next(iterator) - assert data.x.tolist() == [[1, 1], [3, 3], [5, 5]] - assert data.edge_index.tolist() == [[0, 0, 0, 1, 1, 1, 2, 2, 2], - [0, 1, 2, 0, 1, 2, 0, 1, 2]] - assert data.edge_attr.tolist() == [5, 6, 7, 11, 12, 13, 17, 18, 19] - - loader = ClusterLoader(cluster_data, batch_size=2, shuffle=False) - data = next(iter(loader)) - assert data.num_nodes == 6 - assert data.x.tolist() == [ - [0, 0], - [2, 2], - [4, 4], - [1, 1], - [3, 3], - [5, 5], - ] - assert to_dense_adj(data.edge_index).squeeze().tolist() == [ - [1, 1, 1, 1, 0, 0], - [1, 1, 1, 0, 0, 0], - [1, 1, 1, 0, 0, 0], - [1, 0, 0, 1, 1, 1], - [0, 0, 0, 1, 1, 1], - [0, 0, 0, 1, 1, 1], - ] diff --git a/pytorch_geometric-2.3.1/test/loader/test_dataloader.py b/pytorch_geometric-2.3.1/test/loader/test_dataloader.py deleted file mode 100644 index 8bf424f..0000000 --- a/pytorch_geometric-2.3.1/test/loader/test_dataloader.py +++ /dev/null @@ -1,179 +0,0 @@ -import multiprocessing -import sys -from collections import namedtuple - -import pytest -import torch - -from torch_geometric.data import Data, HeteroData -from torch_geometric.loader import DataLoader -from torch_geometric.testing import get_random_edge_index - -with_mp = sys.platform not in ['win32'] -num_workers_list = [0, 2] if with_mp else [0] - -if sys.platform == 'darwin': - multiprocessing.set_start_method('spawn') - - -@pytest.mark.parametrize('num_workers', num_workers_list) -def test_dataloader(num_workers): - x = torch.Tensor([[1], [1], [1]]) - edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - face = torch.tensor([[0], [1], [2]]) - y = 2. - z = torch.tensor(0.) - name = 'data' - - data = Data(x=x, edge_index=edge_index, y=y, z=z, name=name) - assert str(data) == ( - "Data(x=[3, 1], edge_index=[2, 4], y=2.0, z=0.0, name='data')") - data.face = face - - loader = DataLoader([data, data, data, data], batch_size=2, shuffle=False, - num_workers=num_workers) - assert len(loader) == 2 - - for batch in loader: - assert batch.num_graphs == len(batch) == 2 - assert batch.batch.tolist() == [0, 0, 0, 1, 1, 1] - assert batch.ptr.tolist() == [0, 3, 6] - assert batch.x.tolist() == [[1], [1], [1], [1], [1], [1]] - assert batch.edge_index.tolist() == [[0, 1, 1, 2, 3, 4, 4, 5], - [1, 0, 2, 1, 4, 3, 5, 4]] - assert batch.y.tolist() == [2.0, 2.0] - assert batch.z.tolist() == [0.0, 0.0] - assert batch.name == ['data', 'data'] - assert batch.face.tolist() == [[0, 3], [1, 4], [2, 5]] - - for store in batch.stores: - assert id(batch) == id(store._parent()) - - loader = DataLoader([data, data, data, data], batch_size=2, shuffle=False, - follow_batch=['edge_index'], num_workers=num_workers, - collate_fn=None) - assert len(loader) == 2 - - for batch in loader: - assert batch.num_graphs == len(batch) == 2 - assert batch.edge_index_batch.tolist() == [0, 0, 0, 0, 1, 1, 1, 1] - - -def test_dataloader_fallbacks(): - # Test inputs of type List[torch.Tensor]: - data_list = [torch.ones(3) for _ in range(4)] - batch = next(iter(DataLoader(data_list, batch_size=4))) - assert torch.equal(batch, torch.ones(4, 3)) - - # Test inputs of type List[float]: - data_list = [1.0, 1.0, 1.0, 1.0] - batch = next(iter(DataLoader(data_list, batch_size=4))) - assert torch.equal(batch, torch.ones(4)) - - # Test inputs of type List[int]: - data_list = [1, 1, 1, 1] - batch = next(iter(DataLoader(data_list, batch_size=4))) - assert torch.equal(batch, torch.ones(4, dtype=torch.long)) - - # Test inputs of type List[str]: - data_list = ['test'] * 4 - batch = next(iter(DataLoader(data_list, batch_size=4))) - assert batch == data_list - - # Test inputs of type List[Mapping]: - data_list = [{'x': torch.ones(3), 'y': 1}] * 4 - batch = next(iter(DataLoader(data_list, batch_size=4))) - assert torch.equal(batch['x'], torch.ones(4, 3)) - assert torch.equal(batch['y'], torch.ones(4, dtype=torch.long)) - - # Test inputs of type List[Tuple]: - DataTuple = namedtuple('DataTuple', 'x y') - data_list = [DataTuple(0.0, 1)] * 4 - batch = next(iter(DataLoader(data_list, batch_size=4))) - assert torch.equal(batch.x, torch.zeros(4)) - assert torch.equal(batch[1], torch.ones(4, dtype=torch.long)) - - # Test inputs of type List[Sequence]: - data_list = [[0.0, 1]] * 4 - batch = next(iter(DataLoader(data_list, batch_size=4))) - assert torch.equal(batch[0], torch.zeros(4)) - assert torch.equal(batch[1], torch.ones(4, dtype=torch.long)) - - # Test that inputs of unsupported types raise an error: - class DummyClass: - pass - - with pytest.raises(TypeError): - data_list = [DummyClass()] * 4 - next(iter(DataLoader(data_list, batch_size=4))) - - -@pytest.mark.skipif(not with_mp, reason='Multi-processing not available') -def test_multiprocessing(): - queue = torch.multiprocessing.Manager().Queue() - data = Data(x=torch.randn(5, 16)) - data_list = [data, data, data, data] - loader = DataLoader(data_list, batch_size=2) - for batch in loader: - queue.put(batch) - - batch = queue.get() - assert batch.num_graphs == len(batch) == 2 - - batch = queue.get() - assert batch.num_graphs == len(batch) == 2 - - -def test_pin_memory(): - x = torch.randn(3, 16) - edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - data = Data(x=x, edge_index=edge_index) - - loader = DataLoader([data] * 4, batch_size=2, pin_memory=True) - for batch in loader: - assert batch.x.is_pinned() or not torch.cuda.is_available() - assert batch.edge_index.is_pinned() or not torch.cuda.is_available() - - -@pytest.mark.parametrize('num_workers', num_workers_list) -def test_heterogeneous_dataloader(num_workers): - data = HeteroData() - data['p'].x = torch.randn(100, 128) - data['a'].x = torch.randn(200, 128) - data['p', 'a'].edge_index = get_random_edge_index(100, 200, 500) - data['p'].edge_attr = torch.randn(500, 32) - data['a', 'p'].edge_index = get_random_edge_index(200, 100, 400) - data['a', 'p'].edge_attr = torch.randn(400, 32) - - loader = DataLoader([data, data, data, data], batch_size=2, shuffle=False, - num_workers=num_workers) - assert len(loader) == 2 - - for batch in loader: - assert batch.num_graphs == len(batch) == 2 - assert batch.num_nodes == 600 - - for store in batch.stores: - assert id(batch) == id(store._parent()) - - -if __name__ == '__main__': - import argparse - import time - - from torch_geometric.datasets import QM9 - - parser = argparse.ArgumentParser() - parser.add_argument('--num_workers', type=int, default=0) - args = parser.parse_args() - - dataset = QM9('/tmp/QM9') - loader = DataLoader(dataset, batch_size=128, shuffle=True, - num_workers=args.num_workers) - - for _ in range(2): - print(f'Start loading {len(loader)} mini-batches ... ', end='') - t = time.perf_counter() - for batch in loader: - pass - print(f'Done! [{time.perf_counter() - t:.4f}s]') diff --git a/pytorch_geometric-2.3.1/test/loader/test_dynamic_batch_sampler.py b/pytorch_geometric-2.3.1/test/loader/test_dynamic_batch_sampler.py deleted file mode 100644 index 5710833..0000000 --- a/pytorch_geometric-2.3.1/test/loader/test_dynamic_batch_sampler.py +++ /dev/null @@ -1,45 +0,0 @@ -from typing import List - -import pytest -import torch - -from torch_geometric.data import Data -from torch_geometric.loader import DataLoader, DynamicBatchSampler - - -def test_dataloader_with_dynamic_batches(): - data_list: List[Data] = [] - for num_nodes in range(100, 110): - data_list.append(Data(num_nodes=num_nodes)) - - torch.manual_seed(12345) - batch_sampler = DynamicBatchSampler(data_list, 300, shuffle=True) - loader = DataLoader(data_list, batch_sampler=batch_sampler) - - num_nodes_total = 0 - for data in loader: - assert data.num_nodes <= 300 - num_nodes_total += data.num_nodes - assert num_nodes_total == 1045 - - # Test skipping - data_list = [Data(num_nodes=400)] + data_list - batch_sampler = DynamicBatchSampler(data_list, 300, skip_too_big=True, - num_steps=2) - loader = DataLoader(data_list, batch_sampler=batch_sampler) - - num_nodes_total = 0 - for data in loader: - num_nodes_total += data.num_nodes - assert num_nodes_total == 404 - - # Test warning - batch_sampler = DynamicBatchSampler(data_list, 300, skip_too_big=False, - num_steps=2) - loader = DataLoader(data_list, batch_sampler=batch_sampler) - - with pytest.warns(UserWarning, match="is larger than 300 nodes"): - num_nodes_total = 0 - for data in loader: - num_nodes_total += data.num_nodes - assert num_nodes_total == 601 diff --git a/pytorch_geometric-2.3.1/test/loader/test_neighbor_loader.py b/pytorch_geometric-2.3.1/test/loader/test_neighbor_loader.py deleted file mode 100644 index 00bb86d..0000000 --- a/pytorch_geometric-2.3.1/test/loader/test_neighbor_loader.py +++ /dev/null @@ -1,670 +0,0 @@ -import os.path as osp -import subprocess -from time import sleep - -import numpy as np -import pytest -import torch -from torch_sparse import SparseTensor - -import torch_geometric.typing -from torch_geometric.data import Data, HeteroData -from torch_geometric.loader import NeighborLoader -from torch_geometric.nn import GraphConv, to_hetero -from torch_geometric.testing import ( - MyFeatureStore, - MyGraphStore, - get_random_edge_index, - onlyLinux, - withPackage, -) -from torch_geometric.typing import WITH_PYG_LIB -from torch_geometric.utils import k_hop_subgraph - - -def is_subset(subedge_index, edge_index, src_idx, dst_idx): - num_nodes = int(edge_index.max()) + 1 - idx = num_nodes * edge_index[0] + edge_index[1] - subidx = num_nodes * src_idx[subedge_index[0]] + dst_idx[subedge_index[1]] - mask = torch.from_numpy(np.isin(subidx, idx)) - return int(mask.sum()) == mask.numel() - - -@pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode -@pytest.mark.parametrize('dtype', [torch.int64, torch.int32]) -def test_homo_neighbor_loader_basic(directed, dtype): - if dtype != torch.int64 and not torch_geometric.typing.WITH_PYG_LIB: - return - - torch.manual_seed(12345) - - data = Data() - - data.x = torch.arange(100) - data.edge_index = get_random_edge_index(100, 100, 500, dtype) - data.edge_attr = torch.arange(500) - - loader = NeighborLoader(data, num_neighbors=[5] * 2, batch_size=20, - directed=directed) - - assert str(loader) == 'NeighborLoader()' - assert len(loader) == 5 - - for i, batch in enumerate(loader): - assert isinstance(batch, Data) - assert len(batch) == 9 if WITH_PYG_LIB else 7 - assert batch.x.size(0) <= 100 - assert batch.n_id.size() == (batch.num_nodes, ) - assert batch.e_id.size() == (batch.num_edges, ) - assert batch.input_id.numel() == batch.batch_size == 20 - assert batch.x.min() >= 0 and batch.x.max() < 100 - assert batch.edge_index.min() >= 0 - assert batch.edge_index.max() < batch.num_nodes - assert batch.edge_attr.min() >= 0 - assert batch.edge_attr.max() < 500 - - # Input nodes are always sampled first: - assert torch.equal( - batch.x[:batch.batch_size], - torch.arange(i * batch.batch_size, (i + 1) * batch.batch_size)) - - assert is_subset( - batch.edge_index.to(torch.int64), - data.edge_index.to(torch.int64), - batch.x, - batch.x, - ) - - -@pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode -@pytest.mark.parametrize('dtype', [torch.int64, torch.int32]) -def test_hetero_neighbor_loader_basic(directed, dtype): - if dtype != torch.int64 and not torch_geometric.typing.WITH_PYG_LIB: - return - - torch.manual_seed(12345) - - data = HeteroData() - - data['paper'].x = torch.arange(100) - data['author'].x = torch.arange(100, 300) - - edge_index = get_random_edge_index(100, 100, 500, dtype) - data['paper', 'paper'].edge_index = edge_index - data['paper', 'paper'].edge_attr = torch.arange(500) - edge_index = get_random_edge_index(100, 200, 1000, dtype) - data['paper', 'author'].edge_index = edge_index - data['paper', 'author'].edge_attr = torch.arange(500, 1500) - edge_index = get_random_edge_index(200, 100, 1000, dtype) - data['author', 'paper'].edge_index = edge_index - data['author', 'paper'].edge_attr = torch.arange(1500, 2500) - - r1, c1 = data['paper', 'paper'].edge_index - r2, c2 = data['paper', 'author'].edge_index + torch.tensor([[0], [100]]) - r3, c3 = data['author', 'paper'].edge_index + torch.tensor([[100], [0]]) - full_adj = SparseTensor( - row=torch.cat([r1, r2, r3]), - col=torch.cat([c1, c2, c3]), - value=torch.arange(2500), - ) - - batch_size = 20 - - with pytest.raises(ValueError, match="hops must be the same across all"): - loader = NeighborLoader( - data, - num_neighbors={ - ('paper', 'to', 'paper'): [-1], - ('paper', 'to', 'author'): [-1, -1], - ('author', 'to', 'paper'): [-1, -1], - }, - input_nodes='paper', - batch_size=batch_size, - directed=directed, - ) - next(iter(loader)) - - loader = NeighborLoader( - data, - num_neighbors=[10] * 2, - input_nodes='paper', - batch_size=batch_size, - directed=directed, - ) - - assert str(loader) == 'NeighborLoader()' - assert len(loader) == (100 + batch_size - 1) // batch_size - - for batch in loader: - assert isinstance(batch, HeteroData) - - # Test node type selection: - assert set(batch.node_types) == {'paper', 'author'} - - assert len(batch['paper']) == 5 if WITH_PYG_LIB else 4 - assert batch['paper'].n_id.size() == (batch['paper'].num_nodes, ) - assert batch['paper'].x.size(0) <= 100 - assert batch['paper'].input_id.numel() == batch_size - assert batch['paper'].batch_size == batch_size - assert batch['paper'].x.min() >= 0 and batch['paper'].x.max() < 100 - - assert len(batch['author']) == 3 if WITH_PYG_LIB else 2 - assert batch['author'].n_id.size() == (batch['author'].num_nodes, ) - assert batch['author'].x.size(0) <= 200 - assert batch['author'].x.min() >= 100 and batch['author'].x.max() < 300 - - # Test edge type selection: - assert set(batch.edge_types) == {('paper', 'to', 'paper'), - ('paper', 'to', 'author'), - ('author', 'to', 'paper')} - - assert len(batch['paper', 'paper']) == 4 if WITH_PYG_LIB else 3 - num_edges = batch['paper', 'paper'].num_edges - assert batch['paper', 'paper'].e_id.size() == (num_edges, ) - row, col = batch['paper', 'paper'].edge_index - value = batch['paper', 'paper'].edge_attr - assert row.min() >= 0 and row.max() < batch['paper'].num_nodes - assert col.min() >= 0 and col.max() < batch['paper'].num_nodes - assert value.min() >= 0 and value.max() < 500 - if not directed: - adj = full_adj[batch['paper'].x, batch['paper'].x] - assert adj.nnz() == row.size(0) - assert torch.allclose(row.unique(), adj.storage.row().unique()) - assert torch.allclose(col.unique(), adj.storage.col().unique()) - assert torch.allclose(value.unique(), adj.storage.value().unique()) - - assert is_subset( - batch['paper', 'paper'].edge_index.to(torch.int64), - data['paper', 'paper'].edge_index.to(torch.int64), - batch['paper'].x, - batch['paper'].x, - ) - - assert len(batch['paper', 'author']) == 4 if WITH_PYG_LIB else 3 - num_edges = batch['paper', 'author'].num_edges - assert batch['paper', 'author'].e_id.size() == (num_edges, ) - row, col = batch['paper', 'author'].edge_index - value = batch['paper', 'author'].edge_attr - assert row.min() >= 0 and row.max() < batch['paper'].num_nodes - assert col.min() >= 0 and col.max() < batch['author'].num_nodes - assert value.min() >= 500 and value.max() < 1500 - if not directed: - adj = full_adj[batch['paper'].x, batch['author'].x] - assert adj.nnz() == row.size(0) - assert torch.allclose(row.unique(), adj.storage.row().unique()) - assert torch.allclose(col.unique(), adj.storage.col().unique()) - assert torch.allclose(value.unique(), adj.storage.value().unique()) - - assert is_subset( - batch['paper', 'author'].edge_index.to(torch.int64), - data['paper', 'author'].edge_index.to(torch.int64), - batch['paper'].x, - batch['author'].x - 100, - ) - - assert len(batch['author', 'paper']) == 4 if WITH_PYG_LIB else 3 - num_edges = batch['author', 'paper'].num_edges - assert batch['author', 'paper'].e_id.size() == (num_edges, ) - row, col = batch['author', 'paper'].edge_index - value = batch['author', 'paper'].edge_attr - assert row.min() >= 0 and row.max() < batch['author'].num_nodes - assert col.min() >= 0 and col.max() < batch['paper'].num_nodes - assert value.min() >= 1500 and value.max() < 2500 - if not directed: - adj = full_adj[batch['author'].x, batch['paper'].x] - assert adj.nnz() == row.size(0) - assert torch.allclose(row.unique(), adj.storage.row().unique()) - assert torch.allclose(col.unique(), adj.storage.col().unique()) - assert torch.allclose(value.unique(), adj.storage.value().unique()) - - assert is_subset( - batch['author', 'paper'].edge_index.to(torch.int64), - data['author', 'paper'].edge_index.to(torch.int64), - batch['author'].x - 100, - batch['paper'].x, - ) - - # Test for isolated nodes (there shouldn't exist any): - n_id = torch.cat([batch['paper'].x, batch['author'].x]) - row, col, _ = full_adj[n_id, n_id].coo() - assert torch.cat([row, col]).unique().numel() == n_id.numel() - - -@pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode -def test_homo_neighbor_loader_on_cora(get_dataset, directed): - dataset = get_dataset(name='Cora') - data = dataset[0] - data.n_id = torch.arange(data.num_nodes) - data.edge_weight = torch.rand(data.num_edges) - - split_idx = torch.arange(5, 8) - - loader = NeighborLoader(data, num_neighbors=[-1, -1], - batch_size=split_idx.numel(), - input_nodes=split_idx, directed=directed) - assert len(loader) == 1 - - batch = next(iter(loader)) - batch_size = batch.batch_size - - if not directed: - n_id, _, _, e_mask = k_hop_subgraph(split_idx, num_hops=2, - edge_index=data.edge_index, - num_nodes=data.num_nodes) - - assert n_id.sort()[0].tolist() == batch.n_id.sort()[0].tolist() - assert batch.num_edges == int(e_mask.sum()) - - class GNN(torch.nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels): - super().__init__() - self.conv1 = GraphConv(in_channels, hidden_channels) - self.conv2 = GraphConv(hidden_channels, out_channels) - - def forward(self, x, edge_index, edge_weight): - x = self.conv1(x, edge_index, edge_weight).relu() - x = self.conv2(x, edge_index, edge_weight).relu() - return x - - model = GNN(dataset.num_features, 16, dataset.num_classes) - - out1 = model(data.x, data.edge_index, data.edge_weight)[split_idx] - out2 = model(batch.x, batch.edge_index, batch.edge_weight)[:batch_size] - assert torch.allclose(out1, out2, atol=1e-6) - - -@pytest.mark.parametrize('directed', [True]) # TODO re-enable undirected mode -def test_hetero_neighbor_loader_on_cora(get_dataset, directed): - dataset = get_dataset(name='Cora') - data = dataset[0] - data.edge_weight = torch.rand(data.num_edges) - - hetero_data = HeteroData() - hetero_data['paper'].x = data.x - hetero_data['paper'].n_id = torch.arange(data.num_nodes) - hetero_data['paper', 'paper'].edge_index = data.edge_index - hetero_data['paper', 'paper'].edge_weight = data.edge_weight - - split_idx = torch.arange(5, 8) - - loader = NeighborLoader(hetero_data, num_neighbors=[-1, -1], - batch_size=split_idx.numel(), - input_nodes=('paper', split_idx), - directed=directed) - assert len(loader) == 1 - - hetero_batch = next(iter(loader)) - batch_size = hetero_batch['paper'].batch_size - - if not directed: - n_id, _, _, e_mask = k_hop_subgraph(split_idx, num_hops=2, - edge_index=data.edge_index, - num_nodes=data.num_nodes) - - n_id = n_id.sort()[0] - assert n_id.tolist() == hetero_batch['paper'].n_id.sort()[0].tolist() - assert hetero_batch['paper', 'paper'].num_edges == int(e_mask.sum()) - - class GNN(torch.nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels): - super().__init__() - self.conv1 = GraphConv(in_channels, hidden_channels) - self.conv2 = GraphConv(hidden_channels, out_channels) - - def forward(self, x, edge_index, edge_weight): - x = self.conv1(x, edge_index, edge_weight).relu() - x = self.conv2(x, edge_index, edge_weight).relu() - return x - - model = GNN(dataset.num_features, 16, dataset.num_classes) - hetero_model = to_hetero(model, hetero_data.metadata()) - - out1 = model(data.x, data.edge_index, data.edge_weight)[split_idx] - out2 = hetero_model(hetero_batch.x_dict, hetero_batch.edge_index_dict, - hetero_batch.edge_weight_dict)['paper'][:batch_size] - assert torch.allclose(out1, out2, atol=1e-6) - - -@withPackage('pyg_lib') -def test_temporal_hetero_neighbor_loader_on_cora(get_dataset): - dataset = get_dataset(name='Cora') - data = dataset[0] - - hetero_data = HeteroData() - hetero_data['paper'].x = data.x - hetero_data['paper'].time = torch.arange(data.num_nodes, 0, -1) - hetero_data['paper', 'paper'].edge_index = data.edge_index - - loader = NeighborLoader(hetero_data, num_neighbors=[-1, -1], - input_nodes='paper', time_attr='time', - batch_size=1) - - for batch in loader: - mask = batch['paper'].time[0] >= batch['paper'].time[1:] - assert torch.all(mask) - - -@pytest.mark.parametrize('FeatureStore', [MyFeatureStore, HeteroData]) -@pytest.mark.parametrize('GraphStore', [MyGraphStore, HeteroData]) -def test_custom_neighbor_loader(FeatureStore, GraphStore): - # Initialize feature store, graph store, and reference: - feature_store = FeatureStore() - graph_store = GraphStore() - data = HeteroData() - - # Set up node features: - x = torch.arange(100) - data['paper'].x = x - feature_store.put_tensor(x, group_name='paper', attr_name='x', index=None) - - x = torch.arange(100, 300) - data['author'].x = x - feature_store.put_tensor(x, group_name='author', attr_name='x', index=None) - - # COO: - edge_index = get_random_edge_index(100, 100, 500) - data['paper', 'to', 'paper'].edge_index = edge_index - coo = (edge_index[0], edge_index[1]) - graph_store.put_edge_index(edge_index=coo, - edge_type=('paper', 'to', 'paper'), - layout='coo', size=(100, 100)) - - # CSR: - edge_index = get_random_edge_index(100, 200, 1000) - data['paper', 'to', 'author'].edge_index = edge_index - csr = SparseTensor.from_edge_index(edge_index).csr()[:2] - graph_store.put_edge_index(edge_index=csr, - edge_type=('paper', 'to', 'author'), - layout='csr', size=(100, 200)) - - # CSC: - edge_index = get_random_edge_index(200, 100, 1000) - data['author', 'to', 'paper'].edge_index = edge_index - csc = SparseTensor(row=edge_index[1], col=edge_index[0]).csr()[-2::-1] - graph_store.put_edge_index(edge_index=csc, - edge_type=('author', 'to', 'paper'), - layout='csc', size=(200, 100)) - - # COO (sorted): - edge_index = get_random_edge_index(200, 200, 100) - edge_index = edge_index[:, edge_index[1].argsort()] - data['author', 'to', 'author'].edge_index = edge_index - coo = (edge_index[0], edge_index[1]) - graph_store.put_edge_index(edge_index=coo, - edge_type=('author', 'to', 'author'), - layout='coo', size=(200, 200), is_sorted=True) - - # Construct neighbor loaders: - loader1 = NeighborLoader(data, batch_size=20, - input_nodes=('paper', range(100)), - num_neighbors=[-1] * 2) - - loader2 = NeighborLoader((feature_store, graph_store), batch_size=20, - input_nodes=('paper', range(100)), - num_neighbors=[-1] * 2) - - assert str(loader1) == str(loader2) - assert len(loader1) == len(loader2) - - for batch1, batch2 in zip(loader1, loader2): - # loader2 explicitly adds `num_nodes` to the batch - assert len(batch1) + 1 == len(batch2) - assert batch1['paper'].batch_size == batch2['paper'].batch_size - - # Mapped indices of neighbors may be differently sorted: - assert torch.allclose(batch1['paper'].x.sort()[0], - batch2['paper'].x.sort()[0]) - assert torch.allclose(batch1['author'].x.sort()[0], - batch2['author'].x.sort()[0]) - - assert (batch1['paper', 'to', 'paper'].edge_index.size() == batch1[ - 'paper', 'to', 'paper'].edge_index.size()) - assert (batch1['paper', 'to', 'author'].edge_index.size() == batch1[ - 'paper', 'to', 'author'].edge_index.size()) - assert (batch1['author', 'to', 'paper'].edge_index.size() == batch1[ - 'author', 'to', 'paper'].edge_index.size()) - - -@withPackage('pyg_lib') -@pytest.mark.parametrize('FeatureStore', [MyFeatureStore, HeteroData]) -@pytest.mark.parametrize('GraphStore', [MyGraphStore, HeteroData]) -def test_temporal_custom_neighbor_loader_on_cora(get_dataset, FeatureStore, - GraphStore): - # Initialize dataset (once): - dataset = get_dataset(name='Cora') - data = dataset[0] - data.time = torch.arange(data.num_nodes, 0, -1) - - # Initialize feature store, graph store, and reference: - feature_store = FeatureStore() - graph_store = GraphStore() - hetero_data = HeteroData() - - feature_store.put_tensor( - data.x, - group_name='paper', - attr_name='x', - index=None, - ) - hetero_data['paper'].x = data.x - - feature_store.put_tensor( - data.time, - group_name='paper', - attr_name='time', - index=None, - ) - hetero_data['paper'].time = data.time - - # Sort according to time in local neighborhoods: - row, col = data.edge_index - perm = ((col * (data.num_nodes + 1)) + data.time[row]).argsort() - edge_index = data.edge_index[:, perm] - - graph_store.put_edge_index( - edge_index, - edge_type=('paper', 'to', 'paper'), - layout='coo', - is_sorted=True, - size=(data.num_nodes, data.num_nodes), - ) - hetero_data['paper', 'to', 'paper'].edge_index = data.edge_index - - loader1 = NeighborLoader( - hetero_data, - num_neighbors=[-1, -1], - input_nodes='paper', - time_attr='time', - batch_size=128, - ) - - loader2 = NeighborLoader( - (feature_store, graph_store), - num_neighbors=[-1, -1], - input_nodes='paper', - time_attr='time', - batch_size=128, - ) - - for batch1, batch2 in zip(loader1, loader2): - assert torch.equal(batch1['paper'].time, batch2['paper'].time) - - -@withPackage('pyg_lib') -def test_pyg_lib_homo_neighbor_loader(): - adj = SparseTensor.from_edge_index(get_random_edge_index(20, 20, 100)) - colptr, row, _ = adj.csc() - - seed = torch.arange(10) - - sample = torch.ops.pyg.neighbor_sample - out1 = sample(colptr, row, seed, [-1, -1], None, None, True) - sample = torch.ops.torch_sparse.neighbor_sample - out2 = sample(colptr, row, seed, [-1, -1], False, True) - - row1, col1, node_id1, edge_id1 = out1[:4] - node_id2, row2, col2, edge_id2 = out2 - assert torch.equal(node_id1, node_id2) - assert torch.equal(row1, row2) - assert torch.equal(col1, col2) - assert torch.equal(edge_id1, edge_id2) - - -@withPackage('pyg_lib') -def test_pyg_lib_hetero_neighbor_loader(): - adj1 = SparseTensor.from_edge_index(get_random_edge_index(20, 10, 50)) - colptr1, row1, _ = adj1.csc() - - adj2 = SparseTensor.from_edge_index(get_random_edge_index(10, 20, 50)) - colptr2, row2, _ = adj2.csc() - - node_types = ['paper', 'author'] - edge_types = [('paper', 'to', 'author'), ('author', 'to', 'paper')] - colptr_dict = { - 'paper__to__author': colptr1, - 'author__to__paper': colptr2, - } - row_dict = { - 'paper__to__author': row1, - 'author__to__paper': row2, - } - seed_dict = {'paper': torch.arange(1)} - num_neighbors_dict = { - 'paper__to__author': [-1, -1], - 'author__to__paper': [-1, -1], - } - - sample = torch.ops.pyg.hetero_neighbor_sample - out1 = sample(node_types, edge_types, colptr_dict, row_dict, seed_dict, - num_neighbors_dict, None, None, True, False, True, False, - "uniform", True) - sample = torch.ops.torch_sparse.hetero_neighbor_sample - out2 = sample(node_types, edge_types, colptr_dict, row_dict, seed_dict, - num_neighbors_dict, 2, False, True) - - row1_dict, col1_dict, node_id1_dict, edge_id1_dict = out1[:4] - node_id2_dict, row2_dict, col2_dict, edge_id2_dict = out2 - assert len(node_id1_dict) == len(node_id2_dict) - for key in node_id1_dict.keys(): - assert torch.equal(node_id1_dict[key], node_id2_dict[key]) - assert len(row1_dict) == len(row2_dict) - for key in row1_dict.keys(): - assert torch.equal(row1_dict[key], row2_dict[key]) - assert len(col1_dict) == len(col2_dict) - for key in col1_dict.keys(): - assert torch.equal(col1_dict[key], col2_dict[key]) - assert len(edge_id1_dict) == len(edge_id2_dict) - for key in edge_id1_dict.keys(): - assert torch.equal(edge_id1_dict[key], edge_id2_dict[key]) - - -@onlyLinux -def test_memmap_neighbor_loader(tmp_path): - path = osp.join(tmp_path, 'x.npy') - x = np.memmap(path, dtype=np.float32, mode='w+', shape=(100, 32)) - x[:] = np.random.randn(100, 32) - - data = Data() - data.x = np.memmap(path, dtype=np.float32, mode='r', shape=(100, 32)) - data.edge_index = get_random_edge_index(100, 100, 500) - - assert str(data) == 'Data(x=[100, 32], edge_index=[2, 500])' - assert data.num_nodes == 100 - - loader = NeighborLoader(data, num_neighbors=[5] * 2, batch_size=20, - num_workers=2) - batch = next(iter(loader)) - assert batch.num_nodes <= 100 - assert isinstance(batch.x, torch.Tensor) - assert batch.x.size() == (batch.num_nodes, 32) - - -@onlyLinux -@pytest.mark.parametrize('num_workers,loader_cores', [ - (1, None), - (1, [1]), -]) -def test_cpu_affinity_neighbor_loader(num_workers, loader_cores): - data = Data(x=torch.randn(1, 1)) - loader = NeighborLoader(data, num_neighbors=[-1], batch_size=1, - num_workers=num_workers) - - if isinstance(loader_cores, list): - loader_cores = loader_cores[:num_workers] - - out = [] - with loader.enable_cpu_affinity(loader_cores): - iterator = loader._get_iterator().iterator - workers = iterator._workers - for worker in workers: - sleep(1) # Gives time for worker to initialize. - process = subprocess.Popen( - ['taskset', '-c', '-p', f'{worker.pid}'], - stdout=subprocess.PIPE) - stdout = process.communicate()[0].decode('utf-8') - out.append(int(stdout.split(':')[1].strip())) - if not loader_cores: - assert out == list(range(0, num_workers)) - else: - assert out == loader_cores - - -@withPackage('pyg_lib') -def test_homo_neighbor_loader_sampled_info(): - edge_index = torch.tensor([ - [2, 3, 4, 5, 7, 7, 10, 11, 12, 13], - [0, 1, 2, 3, 2, 3, 7, 7, 7, 7], - ]) - - data = Data(edge_index=edge_index, num_nodes=14) - - loader = NeighborLoader( - data, - num_neighbors=[1, 2, 4], - batch_size=2, - shuffle=False, - ) - batch = next(iter(loader)) - - assert batch.num_sampled_nodes == [2, 2, 3, 4] - assert batch.num_sampled_edges == [2, 4, 4] - - -@withPackage('pyg_lib') -def test_hetero_neighbor_loader_sampled_info(): - edge_index = torch.tensor([ - [2, 3, 4, 5, 7, 7, 10, 11, 12, 13], - [0, 1, 2, 3, 2, 3, 7, 7, 7, 7], - ]) - - data = HeteroData() - data['paper'].num_nodes = data['author'].num_nodes = 14 - data['paper', 'paper'].edge_index = edge_index - data['paper', 'author'].edge_index = edge_index - data['author', 'paper'].edge_index = edge_index - - loader = NeighborLoader( - data, - num_neighbors=[1, 2, 4], - batch_size=2, - input_nodes='paper', - shuffle=False, - ) - batch = next(iter(loader)) - - expected_num_sampled_nodes = { - 'paper': [2, 2, 3, 4], - 'author': [0, 2, 3, 4], - } - expected_num_sampled_edges = { - ('paper', 'to', 'paper'): [2, 4, 4], - ('paper', 'to', 'author'): [0, 4, 4], - ('author', 'to', 'paper'): [2, 4, 4], - } - - for node_type in batch.node_types: - assert (batch[node_type].num_sampled_nodes == - expected_num_sampled_nodes[node_type]) - for edge_type in batch.edge_types: - assert (batch[edge_type].num_sampled_edges == - expected_num_sampled_edges[edge_type]) diff --git a/pytorch_geometric-2.3.1/test/loader/test_temporal_dataloader.py b/pytorch_geometric-2.3.1/test/loader/test_temporal_dataloader.py deleted file mode 100644 index 82a1da0..0000000 --- a/pytorch_geometric-2.3.1/test/loader/test_temporal_dataloader.py +++ /dev/null @@ -1,22 +0,0 @@ -import torch - -from torch_geometric.data import TemporalData -from torch_geometric.loader import TemporalDataLoader - - -def test_temporal_dataloader(): - src = dst = t = torch.arange(10) - msg = torch.randn(10, 16) - - data = TemporalData(src=src, dst=dst, t=t, msg=msg) - - loader = TemporalDataLoader(data, batch_size=2) - assert len(loader) == 5 - - for i, batch in enumerate(loader): - assert len(batch) == 2 - arange = range(len(batch) * i, len(batch) * i + len(batch)) - assert batch.src.tolist() == data.src[arange].tolist() - assert batch.dst.tolist() == data.dst[arange].tolist() - assert batch.t.tolist() == data.t[arange].tolist() - assert batch.msg.tolist() == data.msg[arange].tolist() diff --git a/pytorch_geometric-2.3.1/test/nn/aggr/test_attention.py b/pytorch_geometric-2.3.1/test/nn/aggr/test_attention.py deleted file mode 100644 index 2bb620d..0000000 --- a/pytorch_geometric-2.3.1/test/nn/aggr/test_attention.py +++ /dev/null @@ -1,21 +0,0 @@ -import torch - -from torch_geometric.nn import MLP -from torch_geometric.nn.aggr import AttentionalAggregation - - -def test_attentional_aggregation(): - channels = 16 - x = torch.randn(6, channels) - index = torch.tensor([0, 0, 1, 1, 1, 2]) - ptr = torch.tensor([0, 2, 5, 6]) - - gate_nn = MLP([channels, 1], act='relu') - nn = MLP([channels, channels], act='relu') - aggr = AttentionalAggregation(gate_nn, nn) - assert str(aggr) == (f'AttentionalAggregation(gate_nn=MLP({channels}, 1), ' - f'nn=MLP({channels}, {channels}))') - - out = aggr(x, index) - assert out.size() == (3, channels) - torch.allclose(aggr(x, ptr=ptr, dim_size=3), out) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_agnn_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_agnn_conv.py deleted file mode 100644 index 6dad258..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_agnn_conv.py +++ /dev/null @@ -1,31 +0,0 @@ -import pytest -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import AGNNConv -from torch_geometric.testing import is_full_test - - -@pytest.mark.parametrize('requires_grad', [True, False]) -def test_agnn_conv(requires_grad): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - conv = AGNNConv(requires_grad=requires_grad) - assert str(conv) == 'AGNNConv()' - out = conv(x, edge_index) - assert out.size() == (4, 16) - assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out.tolist() - - t = '(Tensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out, atol=1e-6) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_antisymmetric_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_antisymmetric_conv.py deleted file mode 100644 index c48aa02..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_antisymmetric_conv.py +++ /dev/null @@ -1,29 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import AntiSymmetricConv - - -def test_antisymmetric_conv(): - x = torch.randn(4, 8) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - value = torch.rand(row.size(0)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() - - conv = AntiSymmetricConv(8) - assert str(conv) == ('AntiSymmetricConv(8, phi=GCNConv(8, 8), ' - 'num_iters=1, epsilon=0.1, gamma=0.1)') - - out = conv(x, edge_index) - assert out.size() == (4, 8) - assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out, atol=1e-6) - - out = conv(x, edge_index, value) - assert out.size() == (4, 8) - assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) - assert torch.allclose(conv(x, adj4.t()), out, atol=1e-6) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_appnp.py b/pytorch_geometric-2.3.1/test/nn/conv/test_appnp.py deleted file mode 100644 index 50fed85..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_appnp.py +++ /dev/null @@ -1,53 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import APPNP -from torch_geometric.testing import is_full_test - - -def test_appnp(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - conv = APPNP(K=3, alpha=0.1, cached=True) - assert str(conv) == 'APPNP(K=3, alpha=0.1)' - out = conv(x, edge_index) - assert out.size() == (4, 16) - assert torch.allclose(conv(x, adj1.t()), out) - assert torch.allclose(conv(x, adj2.t()), out) - - # Run again to test the cached functionality: - assert conv._cached_edge_index is not None - assert conv._cached_adj_t is not None - assert torch.allclose(conv(x, edge_index), conv(x, adj1.t())) - assert torch.allclose(conv(x, edge_index), conv(x, adj2.t())) - - conv.reset_parameters() - assert conv._cached_edge_index is None - assert conv._cached_adj_t is None - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, edge_index), out) - - t = '(Tensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out) - - -def test_appnp_dropout(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - # With dropout probability of 1.0, the final output equals to alpha * x: - conv = APPNP(K=2, alpha=0.1, dropout=1.0) - assert torch.allclose(0.1 * x, conv(x, edge_index)) - assert torch.allclose(0.1 * x, conv(x, adj1.t())) - assert torch.allclose(0.1 * x, conv(x, adj2.t())) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_arma_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_arma_conv.py deleted file mode 100644 index ed7b6bf..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_arma_conv.py +++ /dev/null @@ -1,37 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import ARMAConv -from torch_geometric.testing import is_full_test - - -def test_arma_conv(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - - conv = ARMAConv(16, 32, num_stacks=8, num_layers=4) - assert str(conv) == 'ARMAConv(16, 32, num_stacks=8, num_layers=4)' - out = conv(x, edge_index) - assert out.size() == (4, 32) - assert conv(x, adj.t()).tolist() == out.tolist() - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out.tolist() - - t = '(Tensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj.t()), out, atol=1e-6) - - -def test_lazy_arma_conv(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - - conv = ARMAConv(-1, 32, num_stacks=8, num_layers=4) - assert str(conv) == 'ARMAConv(-1, 32, num_stacks=8, num_layers=4)' - out = conv(x, edge_index) - assert out.size() == (4, 32) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_cg_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_cg_conv.py deleted file mode 100644 index d0b6795..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_cg_conv.py +++ /dev/null @@ -1,103 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import CGConv -from torch_geometric.testing import is_full_test - - -def test_cg_conv(): - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - conv = CGConv(8) - assert str(conv) == 'CGConv(8, dim=0)' - out = conv(x1, edge_index) - assert out.size() == (4, 8) - assert torch.allclose(conv(x1, adj1.t()), out) - assert torch.allclose(conv(x1, adj2.t()), out) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index), out, atol=1e-6) - - t = '(Tensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out, atol=1e-6) - - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() - conv = CGConv((8, 16)) - assert str(conv) == 'CGConv((8, 16), dim=0)' - out = conv((x1, x2), edge_index) - assert out.size() == (2, 16) - assert torch.allclose(conv((x1, x2), adj1.t()), out, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out, atol=1e-6) - - if is_full_test(): - t = '(PairTensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index), out, atol=1e-6) - - t = '(PairTensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out, atol=1e-6) - - # Test batch_norm true: - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - conv = CGConv(8, batch_norm=True) - assert str(conv) == 'CGConv(8, dim=0)' - out = conv(x1, edge_index) - assert out.size() == (4, 8) - assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index), out, atol=1e-6) - - -def test_cg_conv_with_edge_features(): - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.rand(row.size(0), 3) - adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - - conv = CGConv(8, dim=3) - assert str(conv) == 'CGConv(8, dim=3)' - out = conv(x1, edge_index, value) - assert out.size() == (4, 8) - assert conv(x1, adj.t()).tolist() == out.tolist() - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, edge_index, value).tolist() == out.tolist() - - t = '(Tensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, adj.t()).tolist() == out.tolist() - - adj = adj.sparse_resize((4, 2)) - conv = CGConv((8, 16), dim=3) - assert str(conv) == 'CGConv((8, 16), dim=3)' - out = conv((x1, x2), edge_index, value) - assert out.size() == (2, 16) - assert conv((x1, x2), adj.t()).tolist() == out.tolist() - - if is_full_test(): - t = '(PairTensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), edge_index, value).tolist() == out.tolist() - - t = '(PairTensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), adj.t()).tolist() == out.tolist() diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_cheb_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_cheb_conv.py deleted file mode 100644 index 18546fa..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_cheb_conv.py +++ /dev/null @@ -1,45 +0,0 @@ -import torch - -from torch_geometric.nn import ChebConv -from torch_geometric.testing import is_full_test - - -def test_cheb_conv(): - in_channels, out_channels = (16, 32) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - num_nodes = edge_index.max().item() + 1 - edge_weight = torch.rand(edge_index.size(1)) - x = torch.randn((num_nodes, in_channels)) - - conv = ChebConv(in_channels, out_channels, K=3) - assert str(conv) == 'ChebConv(16, 32, K=3, normalization=sym)' - out1 = conv(x, edge_index) - assert out1.size() == (num_nodes, out_channels) - out2 = conv(x, edge_index, edge_weight) - assert out2.size() == (num_nodes, out_channels) - out3 = conv(x, edge_index, edge_weight, lambda_max=3.0) - assert out3.size() == (num_nodes, out_channels) - - if is_full_test(): - jit = torch.jit.script(conv.jittable()) - assert jit(x, edge_index).tolist() == out1.tolist() - assert jit(x, edge_index, edge_weight).tolist() == out2.tolist() - assert jit(x, edge_index, edge_weight, - lambda_max=torch.tensor(3.0)).tolist() == out3.tolist() - - batch = torch.tensor([0, 0, 1, 1]) - edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 3, 2]]) - num_nodes = edge_index.max().item() + 1 - edge_weight = torch.rand(edge_index.size(1)) - x = torch.randn((num_nodes, in_channels)) - lambda_max = torch.tensor([2.0, 3.0]) - - out4 = conv(x, edge_index, edge_weight, batch) - assert out4.size() == (num_nodes, out_channels) - out5 = conv(x, edge_index, edge_weight, batch, lambda_max) - assert out5.size() == (num_nodes, out_channels) - - if is_full_test(): - assert jit(x, edge_index, edge_weight, batch).tolist() == out4.tolist() - assert jit(x, edge_index, edge_weight, batch, - lambda_max).tolist() == out5.tolist() diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_cluster_gcn_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_cluster_gcn_conv.py deleted file mode 100644 index d7a0a59..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_cluster_gcn_conv.py +++ /dev/null @@ -1,29 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import ClusterGCNConv -from torch_geometric.testing import is_full_test - - -def test_cluster_gcn_conv(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - conv = ClusterGCNConv(16, 32, diag_lambda=1.) - assert str(conv) == 'ClusterGCNConv(16, 32, diag_lambda=1.0)' - out = conv(x, edge_index) - assert out.size() == (4, 32) - assert torch.allclose(conv(x, adj1.t()), out, atol=1e-5) - assert torch.allclose(conv(x, adj2.t()), out, atol=1e-5) - - if is_full_test(): - t = '(Tensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out.tolist() - - t = '(Tensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out, atol=1e-5) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_dna_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_dna_conv.py deleted file mode 100644 index 4d88b96..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_dna_conv.py +++ /dev/null @@ -1,83 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import DNAConv -from torch_geometric.testing import is_full_test - - -def test_dna_conv1(): - channels = 32 - num_layers = 3 - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - num_nodes = edge_index.max().item() + 1 - x = torch.randn((num_nodes, num_layers, channels)) - - conv = DNAConv(channels, heads=4, groups=8, dropout=0.0) - assert str(conv) == 'DNAConv(32, heads=4, groups=8)' - out = conv(x, edge_index) - assert out.size() == (num_nodes, channels) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out.tolist() - - conv = DNAConv(channels, heads=1, groups=1, dropout=0.0) - assert str(conv) == 'DNAConv(32, heads=1, groups=1)' - out = conv(x, edge_index) - assert out.size() == (num_nodes, channels) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out.tolist() - - conv = DNAConv(channels, heads=1, groups=1, dropout=0.0, cached=True) - out = conv(x, edge_index) - out = conv(x, edge_index) - assert out.size() == (num_nodes, channels) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out.tolist() - - -def test_dna_conv2(): - x = torch.randn((4, 3, 32)) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - value = torch.rand(row.size(0)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() - - conv = DNAConv(32, heads=4, groups=8, dropout=0.0) - assert str(conv) == 'DNAConv(32, heads=4, groups=8)' - out1 = conv(x, edge_index) - assert out1.size() == (4, 32) - assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) - out2 = conv(x, edge_index, value) - assert out2.size() == (4, 32) - assert torch.allclose(conv(x, adj2.t()), out2, atol=1e-6) - assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out1.tolist() - assert jit(x, edge_index, value).tolist() == out2.tolist() - - t = '(Tensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit(x, adj2.t()), out2, atol=1e-6) - - conv.cached = True - conv(x, edge_index) - assert conv(x, edge_index).tolist() == out1.tolist() - conv(x, adj1.t()) - assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_edge_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_edge_conv.py deleted file mode 100644 index 0419647..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_edge_conv.py +++ /dev/null @@ -1,102 +0,0 @@ -import torch -from torch.nn import Linear as Lin -from torch.nn import ReLU -from torch.nn import Sequential as Seq -from torch_sparse import SparseTensor - -from torch_geometric.nn import DynamicEdgeConv, EdgeConv -from torch_geometric.testing import is_full_test, withPackage - - -def test_edge_conv_conv(): - x1 = torch.randn(4, 16) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - nn = Seq(Lin(32, 16), ReLU(), Lin(16, 32)) - conv = EdgeConv(nn) - assert str(conv) == ( - 'EdgeConv(nn=Sequential(\n' - ' (0): Linear(in_features=32, out_features=16, bias=True)\n' - ' (1): ReLU()\n' - ' (2): Linear(in_features=16, out_features=32, bias=True)\n' - '))') - out1 = conv(x1, edge_index) - assert out1.size() == (4, 32) - assert conv((x1, x1), edge_index).tolist() == out1.tolist() - assert torch.allclose(conv(x1, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, x1), adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, x1), adj2.t()), out1, atol=1e-6) - - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() - out2 = conv((x1, x2), edge_index) - assert out2.size() == (2, 32) - assert torch.allclose(conv((x1, x2), adj1.t()), out2, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out2, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, edge_index).tolist() == out1.tolist() - - t = '(PairTensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x1), edge_index).tolist() == out1.tolist() - assert jit((x1, x2), edge_index).tolist() == out2.tolist() - - adj1 = adj1.sparse_resize((4, 4)) - t = '(Tensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, adj1.t()).tolist() == out1.tolist() - - t = '(PairTensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x1), adj1.t()).tolist() == out1.tolist() - adj1 = adj1.sparse_resize((4, 2)) - assert jit((x1, x2), adj1.t()).tolist() == out2.tolist() - - -@withPackage('torch_cluster') -def test_dynamic_edge_conv_conv(): - x1 = torch.randn(8, 16) - x2 = torch.randn(4, 16) - batch1 = torch.tensor([0, 0, 0, 0, 1, 1, 1, 1]) - batch2 = torch.tensor([0, 0, 1, 1]) - - nn = Seq(Lin(32, 16), ReLU(), Lin(16, 32)) - conv = DynamicEdgeConv(nn, k=2) - assert str(conv) == ( - 'DynamicEdgeConv(nn=Sequential(\n' - ' (0): Linear(in_features=32, out_features=16, bias=True)\n' - ' (1): ReLU()\n' - ' (2): Linear(in_features=16, out_features=32, bias=True)\n' - '), k=2)') - out11 = conv(x1) - assert out11.size() == (8, 32) - - out12 = conv(x1, batch1) - assert out12.size() == (8, 32) - - out21 = conv((x1, x2)) - assert out21.size() == (4, 32) - - out22 = conv((x1, x2), (batch1, batch2)) - assert out22.size() == (4, 32) - - if is_full_test(): - t = '(Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x1).tolist() == out11.tolist() - assert jit(x1, batch1).tolist() == out12.tolist() - - t = '(PairTensor, Optional[PairTensor]) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2)).tolist() == out21.tolist() - assert jit((x1, x2), (batch1, batch2)).tolist() == out22.tolist() - - torch.jit.script(conv.jittable()) # Test without explicit typing. diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_eg_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_eg_conv.py deleted file mode 100644 index 6d7536b..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_eg_conv.py +++ /dev/null @@ -1,74 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import EGConv -from torch_geometric.testing import is_full_test - - -def test_eg_conv(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - conv = EGConv(16, 32) - assert str(conv) == "EGConv(16, 32, aggregators=['symnorm'])" - out = conv(x, edge_index) - assert out.size() == (4, 32) - assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) - - conv.cached = True - conv(x, edge_index) - assert conv(x, edge_index).tolist() == out.tolist() - conv(x, adj1.t()) - assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out.tolist() - - t = '(Tensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out, atol=1e-6) - - -def test_eg_conv_multiple_aggregators(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - - conv = EGConv(16, 32, aggregators=["max", "min"]) - assert str(conv) == "EGConv(16, 32, aggregators=['max', 'min'])" - out = conv(x, edge_index) - assert out.size() == (4, 32) - assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) - - conv.cached = True - conv(x, edge_index) - assert conv(x, edge_index).tolist() == out.tolist() - conv(x, adj1.t()) - assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out.tolist() - - t = '(Tensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out, atol=1e-6) - - -def test_eg_conv_with_sparse_input_feature(): - x = torch.sparse_coo_tensor(indices=torch.tensor([[0, 0], [0, 1]]), - values=torch.tensor([1., 1.]), - size=torch.Size([4, 16])) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - - conv = EGConv(16, 32) - assert conv(x, edge_index).size() == (4, 32) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_fa_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_fa_conv.py deleted file mode 100644 index fa35ad3..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_fa_conv.py +++ /dev/null @@ -1,80 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import FAConv -from torch_geometric.testing import is_full_test - - -def test_fa_conv(): - x = torch.randn(4, 16) - x_0 = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - conv = FAConv(16, eps=1.0, cached=True) - assert str(conv) == 'FAConv(16, eps=1.0)' - out = conv(x, x_0, edge_index) - assert out.size() == (4, 16) - assert torch.allclose(conv(x, x_0, adj1.t()), out) - - # Run again to test the cached functionality: - assert conv._cached_edge_index is not None - assert conv._cached_adj_t is not None - assert torch.allclose(conv(x, x_0, edge_index), conv(x, x_0, adj1.t())) - - if is_full_test(): - t = '(Tensor, Tensor, Tensor, OptTensor, NoneType) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, x_0, edge_index), out) - - t = '(Tensor, Tensor, SparseTensor, OptTensor, NoneType) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, x_0, adj1.t()), out) - - conv.reset_parameters() - assert conv._cached_edge_index is None - assert conv._cached_adj_t is None - - # Test without caching: - conv.cached = False - out = conv(x, x_0, edge_index) - assert torch.allclose(conv(x, x_0, adj1.t()), out) - assert torch.allclose(conv(x, x_0, adj2.t()), out) - - # Test `return_attention_weights`. - result = conv(x, x_0, edge_index, return_attention_weights=True) - assert torch.allclose(result[0], out) - assert result[1][0].size() == (2, 10) - assert result[1][1].size() == (10, ) - assert conv._alpha is None - - result = conv(x, x_0, adj1.t(), return_attention_weights=True) - assert torch.allclose(result[0], out) - assert result[1].sizes() == [4, 4] and result[1].nnz() == 10 - assert conv._alpha is None - - result = conv(x, x_0, adj2.t(), return_attention_weights=True) - assert torch.allclose(result[0], out) - assert result[1][0].size() == torch.Size([4, 4]) - assert result[1][0]._nnz() == 10 - assert conv._alpha is None - - if is_full_test(): - t = ('(Tensor, Tensor, Tensor, OptTensor, bool) ' - '-> Tuple[Tensor, Tuple[Tensor, Tensor]]') - jit = torch.jit.script(conv.jittable(t)) - result = jit(x, x_0, edge_index, return_attention_weights=True) - assert torch.allclose(result[0], out) - assert result[1][0].size() == (2, 10) - assert result[1][1].size() == (10, ) - assert conv._alpha is None - - t = ('(Tensor, Tensor, SparseTensor, OptTensor, bool) ' - '-> Tuple[Tensor, SparseTensor]') - jit = torch.jit.script(conv.jittable(t)) - result = jit(x, x_0, adj1.t(), return_attention_weights=True) - assert torch.allclose(result[0], out) - assert result[1].sizes() == [4, 4] and result[1].nnz() == 10 - assert conv._alpha is None diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_fast_hgt_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_fast_hgt_conv.py deleted file mode 100644 index 55d36bf..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_fast_hgt_conv.py +++ /dev/null @@ -1,26 +0,0 @@ -import torch - -from torch_geometric.nn import FastHGTConv -from torch_geometric.testing import get_random_edge_index - - -def test_fast_hgt_conv(): - x_dict = { - 'author': torch.randn(4, 16), - 'paper': torch.randn(6, 16), - } - edge_index = get_random_edge_index(4, 6, num_edges=20) - - edge_index_dict = { - ('author', 'writes', 'paper'): edge_index, - ('paper', 'written_by', 'author'): edge_index.flip([0]), - } - - metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) - - conv = FastHGTConv(16, 16, metadata, heads=2) - assert str(conv) == 'FastHGTConv(-1, 16, heads=2)' - out_dict1 = conv(x_dict, edge_index_dict) - assert len(out_dict1) == 2 - assert out_dict1['author'].size() == (4, 16) - assert out_dict1['paper'].size() == (6, 16) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_feast_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_feast_conv.py deleted file mode 100644 index c26b60a..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_feast_conv.py +++ /dev/null @@ -1,47 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import FeaStConv -from torch_geometric.testing import is_full_test - - -def test_feast_conv(): - x1 = torch.randn(4, 16) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - conv = FeaStConv(16, 32, heads=2) - assert str(conv) == 'FeaStConv(16, 32, heads=2)' - - out = conv(x1, edge_index) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, edge_index).tolist() == out.tolist() - - t = '(Tensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out, atol=1e-6) - - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() - out = conv((x1, x2), edge_index) - assert out.size() == (2, 32) - assert torch.allclose(conv((x1, x2), adj1.t()), out, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out, atol=1e-6) - - if is_full_test(): - t = '(PairTensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), edge_index).tolist() == out.tolist() - - t = '(PairTensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out, atol=1e-6) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_film_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_film_conv.py deleted file mode 100644 index f7943fa..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_film_conv.py +++ /dev/null @@ -1,72 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import FiLMConv -from torch_geometric.testing import is_full_test - - -def test_film_conv(): - x1 = torch.randn(4, 4) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [0, 0, 1, 0, 1, 1]]) - edge_type = torch.tensor([0, 1, 1, 0, 0, 1]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, value=edge_type, sparse_sizes=(4, 4)) - - conv = FiLMConv(4, 32) - assert str(conv) == 'FiLMConv(4, 32, num_relations=1)' - out1 = conv(x1, edge_index) - assert out1.size() == (4, 32) - assert conv(x1, adj.t().set_value(None)).tolist() == out1.tolist() - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, edge_index).tolist() == out1.tolist() - t = '(Tensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, adj.t().set_value(None)).tolist() == out1.tolist() - - conv = FiLMConv(4, 32, num_relations=2) - assert str(conv) == 'FiLMConv(4, 32, num_relations=2)' - out1 = conv(x1, edge_index, edge_type) - assert out1.size() == (4, 32) - assert conv(x1, adj.t()).tolist() == out1.tolist() - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, edge_index, edge_type).tolist() == out1.tolist() - t = '(Tensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, adj.t()).tolist() == out1.tolist() - - adj = adj.sparse_resize((4, 2)) - - conv = FiLMConv((4, 16), 32) - assert str(conv) == 'FiLMConv((4, 16), 32, num_relations=1)' - out1 = conv((x1, x2), edge_index) - assert out1.size() == (2, 32) - assert conv((x1, x2), adj.t().set_value(None)).tolist() == out1.tolist() - - if is_full_test(): - t = '(PairTensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), edge_index).tolist() == out1.tolist() - t = '(PairTensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), adj.t().set_value(None)).tolist() == out1.tolist() - - conv = FiLMConv((4, 16), 32, num_relations=2) - assert str(conv) == 'FiLMConv((4, 16), 32, num_relations=2)' - out1 = conv((x1, x2), edge_index, edge_type) - assert out1.size() == (2, 32) - assert conv((x1, x2), adj.t()).tolist() == out1.tolist() - - if is_full_test(): - t = '(PairTensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), edge_index, edge_type).tolist() == out1.tolist() - t = '(PairTensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), adj.t()).tolist() == out1.tolist() diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_gat_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_gat_conv.py deleted file mode 100644 index 126659c..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_gat_conv.py +++ /dev/null @@ -1,137 +0,0 @@ -import pytest -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import GATConv -from torch_geometric.testing import is_full_test, withCUDA - - -def test_gat_conv(): - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - conv = GATConv(8, 32, heads=2) - assert str(conv) == 'GATConv(8, 32, heads=2)' - out = conv(x1, edge_index) - assert out.size() == (4, 64) - assert torch.allclose(conv(x1, edge_index, size=(4, 4)), out) - assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor, Size, NoneType) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index), out) - assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out) - - t = '(Tensor, SparseTensor, OptTensor, Size, NoneType) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out, atol=1e-6) - - # Test `return_attention_weights`. - result = conv(x1, edge_index, return_attention_weights=True) - assert torch.allclose(result[0], out) - assert result[1][0].size() == (2, 7) - assert result[1][1].size() == (7, 2) - assert result[1][1].min() >= 0 and result[1][1].max() <= 1 - - result = conv(x1, adj1.t(), return_attention_weights=True) - assert torch.allclose(result[0], out, atol=1e-6) - assert result[1].sizes() == [4, 4, 2] and result[1].nnz() == 7 - - result = conv(x1, adj2.t(), return_attention_weights=True) - assert torch.allclose(result[0], out, atol=1e-6) - assert result[1][0].size() == torch.Size([4, 4, 2]) - assert result[1][0]._nnz() == 7 - - if is_full_test(): - t = ('(Tensor, Tensor, OptTensor, Size, bool) -> ' - 'Tuple[Tensor, Tuple[Tensor, Tensor]]') - jit = torch.jit.script(conv.jittable(t)) - result = jit(x1, edge_index, return_attention_weights=True) - assert torch.allclose(result[0], out) - assert result[1][0].size() == (2, 7) - assert result[1][1].size() == (7, 2) - assert result[1][1].min() >= 0 and result[1][1].max() <= 1 - - t = ('(Tensor, SparseTensor, OptTensor, Size, bool) -> ' - 'Tuple[Tensor, SparseTensor]') - jit = torch.jit.script(conv.jittable(t)) - result = jit(x1, adj1.t(), return_attention_weights=True) - assert torch.allclose(result[0], out, atol=1e-6) - assert result[1].sizes() == [4, 4, 2] and result[1].nnz() == 7 - - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() - conv = GATConv((8, 16), 32, heads=2) - assert str(conv) == 'GATConv((8, 16), 32, heads=2)' - - out1 = conv((x1, x2), edge_index) - out2 = conv((x1, None), edge_index, size=(4, 2)) - assert out1.size() == (2, 64) - assert out2.size() == (2, 64) - assert torch.allclose(conv((x1, x2), edge_index, size=(4, 2)), out1) - assert torch.allclose(conv((x1, x2), adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, None), adj1.t()), out2, atol=1e-6) - assert torch.allclose(conv((x1, None), adj2.t()), out2, atol=1e-6) - - if is_full_test(): - t = '(OptPairTensor, Tensor, OptTensor, Size, NoneType) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index), out1) - assert torch.allclose(jit((x1, x2), edge_index, size=(4, 2)), out1) - assert torch.allclose(jit((x1, None), edge_index, size=(4, 2)), out2) - - t = ('(OptPairTensor, SparseTensor, OptTensor, Size, NoneType) -> ' - 'Tensor') - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit((x1, None), adj1.t()), out2, atol=1e-6) - - -def test_gat_conv_with_edge_attr(): - x = torch.randn(4, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 1, 1]]) - edge_weight = torch.randn(edge_index.size(1)) - edge_attr = torch.randn(edge_index.size(1), 4) - adj1 = SparseTensor.from_edge_index(edge_index, edge_weight) - adj2 = SparseTensor.from_edge_index(edge_index, edge_attr) - - conv = GATConv(8, 32, heads=2, edge_dim=1, fill_value=0.5) - out = conv(x, edge_index, edge_weight) - assert out.size() == (4, 64) - with pytest.raises(NotImplementedError): - assert torch.allclose(conv(x, adj1.t()), out) - - conv = GATConv(8, 32, heads=2, edge_dim=1, fill_value='mean') - out = conv(x, edge_index, edge_weight) - assert out.size() == (4, 64) - with pytest.raises(NotImplementedError): - assert torch.allclose(conv(x, adj1.t()), out) - - conv = GATConv(8, 32, heads=2, edge_dim=4, fill_value=0.5) - out = conv(x, edge_index, edge_attr) - assert out.size() == (4, 64) - with pytest.raises(NotImplementedError): - assert torch.allclose(conv(x, adj2.t()), out) - - conv = GATConv(8, 32, heads=2, edge_dim=4, fill_value='mean') - out = conv(x, edge_index, edge_attr) - assert out.size() == (4, 64) - with pytest.raises(NotImplementedError): - assert torch.allclose(conv(x, adj2.t()), out) - - -@withCUDA -def test_gat_conv_empty_edge_index(device): - x = torch.randn(0, 8, device=device) - edge_index = torch.empty(2, 0, dtype=torch.long, device=device) - - conv = GATConv(8, 32, heads=2).to(device) - out = conv(x, edge_index) - assert out.size() == (0, 64) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_gated_graph_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_gated_graph_conv.py deleted file mode 100644 index da96814..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_gated_graph_conv.py +++ /dev/null @@ -1,38 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import GatedGraphConv -from torch_geometric.testing import is_full_test - - -def test_gated_graph_conv(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - value = torch.rand(row.size(0)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() - - conv = GatedGraphConv(32, num_layers=3) - assert str(conv) == 'GatedGraphConv(32, num_layers=3)' - out1 = conv(x, edge_index) - assert out1.size() == (4, 32) - assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) - out2 = conv(x, edge_index, value) - assert out2.size() == (4, 32) - assert torch.allclose(conv(x, adj2.t()), out2, atol=1e-6) - assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out1.tolist() - assert jit(x, edge_index, value).tolist() == out2.tolist() - - t = '(Tensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit(x, adj2.t()), out2, atol=1e-6) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_gatv2_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_gatv2_conv.py deleted file mode 100644 index 19328c3..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_gatv2_conv.py +++ /dev/null @@ -1,108 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import GATv2Conv -from torch_geometric.testing import is_full_test - - -def test_gatv2_conv(): - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - conv = GATv2Conv(8, 32, heads=2) - assert str(conv) == 'GATv2Conv(8, 32, heads=2)' - out = conv(x1, edge_index) - assert out.size() == (4, 64) - assert torch.allclose(conv(x1, edge_index), out) - assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor, NoneType) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index), out) - - t = '(Tensor, SparseTensor, OptTensor, NoneType) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out, atol=1e-6) - - # Test `return_attention_weights`. - result = conv(x1, edge_index, return_attention_weights=True) - assert torch.allclose(result[0], out) - assert result[1][0].size() == (2, 7) - assert result[1][1].size() == (7, 2) - assert result[1][1].min() >= 0 and result[1][1].max() <= 1 - assert conv._alpha is None - - result = conv(x1, adj1.t(), return_attention_weights=True) - assert torch.allclose(result[0], out, atol=1e-6) - assert result[1].sizes() == [4, 4, 2] and result[1].nnz() == 7 - assert conv._alpha is None - - result = conv(x1, adj2.t(), return_attention_weights=True) - assert torch.allclose(result[0], out, atol=1e-6) - assert result[1][0].size() == torch.Size([4, 4, 2]) - assert result[1][0]._nnz() == 7 - - if is_full_test(): - t = ('(Tensor, Tensor, OptTensor, bool) -> ' - 'Tuple[Tensor, Tuple[Tensor, Tensor]]') - jit = torch.jit.script(conv.jittable(t)) - result = jit(x1, edge_index, return_attention_weights=True) - assert torch.allclose(result[0], out) - assert result[1][0].size() == (2, 7) - assert result[1][1].size() == (7, 2) - assert result[1][1].min() >= 0 and result[1][1].max() <= 1 - assert conv._alpha is None - - t = ('(Tensor, SparseTensor, OptTensor, bool) -> ' - 'Tuple[Tensor, SparseTensor]') - jit = torch.jit.script(conv.jittable(t)) - result = jit(x1, adj1.t(), return_attention_weights=True) - assert torch.allclose(result[0], out, atol=1e-6) - assert result[1].sizes() == [4, 4, 2] and result[1].nnz() == 7 - assert conv._alpha is None - - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() - out1 = conv((x1, x2), edge_index) - assert out1.size() == (2, 64) - assert torch.allclose(conv((x1, x2), edge_index), out1) - assert torch.allclose(conv((x1, x2), adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out1, atol=1e-6) - - if is_full_test(): - t = '(OptPairTensor, Tensor, OptTensor, NoneType) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index), out1) - - t = '(OptPairTensor, SparseTensor, OptTensor, NoneType) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out1, atol=1e-6) - - -def test_gatv2_conv_with_edge_attr(): - x = torch.randn(4, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 1, 1]]) - edge_weight = torch.randn(edge_index.size(1)) - edge_attr = torch.randn(edge_index.size(1), 4) - - conv = GATv2Conv(8, 32, heads=2, edge_dim=1, fill_value=0.5) - out = conv(x, edge_index, edge_weight) - assert out.size() == (4, 64) - - conv = GATv2Conv(8, 32, heads=2, edge_dim=1, fill_value='mean') - out = conv(x, edge_index, edge_weight) - assert out.size() == (4, 64) - - conv = GATv2Conv(8, 32, heads=2, edge_dim=4, fill_value=0.5) - out = conv(x, edge_index, edge_attr) - assert out.size() == (4, 64) - - conv = GATv2Conv(8, 32, heads=2, edge_dim=4, fill_value='mean') - out = conv(x, edge_index, edge_attr) - assert out.size() == (4, 64) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_gcn2_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_gcn2_conv.py deleted file mode 100644 index f570b41..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_gcn2_conv.py +++ /dev/null @@ -1,48 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import GCN2Conv -from torch_geometric.testing import is_full_test - - -def test_gcn2_conv(): - x = torch.randn(4, 16) - x_0 = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - value = torch.rand(row.size(0)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() - - conv = GCN2Conv(16, alpha=0.2) - assert str(conv) == 'GCN2Conv(16, alpha=0.2, beta=1.0)' - out1 = conv(x, x_0, edge_index) - assert out1.size() == (4, 16) - assert torch.allclose(conv(x, x_0, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, x_0, adj3.t()), out1, atol=1e-6) - out2 = conv(x, x_0, edge_index, value) - assert out2.size() == (4, 16) - assert torch.allclose(conv(x, x_0, adj2.t()), out2, atol=1e-6) - assert torch.allclose(conv(x, x_0, adj4.t()), out2, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x, x_0, edge_index).tolist() == out1.tolist() - assert jit(x, x_0, edge_index, value).tolist() == out2.tolist() - - t = '(Tensor, Tensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, x_0, adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit(x, x_0, adj2.t()), out2, atol=1e-6) - - conv.cached = True - conv(x, x_0, edge_index) - assert torch.allclose(conv(x, x_0, edge_index), out1, atol=1e-6) - conv._cached_edge_index = None - conv(x, x_0, adj3.t()) - assert torch.allclose(conv(x, x_0, adj3.t()), out1, atol=1e-6) - conv(x, x_0, adj1.t()) - assert torch.allclose(conv(x, x_0, adj1.t()), out1, atol=1e-6) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_gcn_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_gcn_conv.py deleted file mode 100644 index 4ac0261..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_gcn_conv.py +++ /dev/null @@ -1,117 +0,0 @@ -import copy - -import pytest -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import GCNConv -from torch_geometric.nn.conv.gcn_conv import gcn_norm -from torch_geometric.testing import is_full_test -from torch_geometric.utils import to_torch_coo_tensor - - -def test_gcn_conv(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - value = torch.rand(row.size(0)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() - - conv = GCNConv(16, 32) - assert str(conv) == 'GCNConv(16, 32)' - out1 = conv(x, edge_index) - assert out1.size() == (4, 32) - assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) - out2 = conv(x, edge_index, value) - assert out2.size() == (4, 32) - assert torch.allclose(conv(x, adj2.t()), out2, atol=1e-6) - assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out1.tolist() - assert jit(x, edge_index, value).tolist() == out2.tolist() - - t = '(Tensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit(x, adj2.t()), out2, atol=1e-6) - - conv.cached = True - conv(x, edge_index) - assert conv(x, edge_index).tolist() == out1.tolist() - conv(x, adj1.t()) - assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - - -def test_gcn_conv_with_decomposed_layers(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - - conv = GCNConv(16, 32) - - decomposed_conv = copy.deepcopy(conv) - decomposed_conv.decomposed_layers = 2 - - out1 = conv(x, edge_index) - out2 = decomposed_conv(x, edge_index) - assert torch.allclose(out1, out2) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(decomposed_conv.jittable(t)) - assert jit(x, edge_index).tolist() == out1.tolist() - - -def test_gcn_conv_with_sparse_input_feature(): - x = torch.sparse_coo_tensor( - indices=torch.tensor([[0, 0], [0, 1]]), - values=torch.tensor([1., 1.]), - size=torch.Size([4, 16]), - ) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - - conv = GCNConv(16, 32) - assert conv(x, edge_index).size() == (4, 32) - - -def test_static_gcn_conv(): - x = torch.randn(3, 4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - - conv = GCNConv(16, 32) - out = conv(x, edge_index) - assert out.size() == (3, 4, 32) - - -def test_gcn_conv_norm(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0], [1, 2, 3]]) - row, col = edge_index - - conv = GCNConv(16, 32, flow="source_to_target") - out1 = conv(x, edge_index) - conv.flow = "target_to_source" - out2 = conv(x, edge_index.flip(0)) - assert torch.allclose(out1, out2, atol=1e-6) - - -@pytest.mark.parametrize('requires_grad', [False, True]) -@pytest.mark.parametrize('layout', [torch.sparse_coo, torch.sparse_csr]) -def test_gcn_norm_gradient(requires_grad, layout): - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - edge_weight = torch.ones(edge_index.size(1), requires_grad=requires_grad) - adj = to_torch_coo_tensor(edge_index, edge_weight) - if layout == torch.sparse_csr: - adj = adj.to_sparse_csr() - - # TODO Sparse CSR tensor does not yet inherit `requires_grad` from `value`. - if layout == torch.sparse_csr: - assert not gcn_norm(adj)[0].requires_grad - else: - assert adj.requires_grad == gcn_norm(adj)[0].requires_grad diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_gen_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_gen_conv.py deleted file mode 100644 index a66de73..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_gen_conv.py +++ /dev/null @@ -1,138 +0,0 @@ -import pytest -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import GENConv -from torch_geometric.testing import is_full_test - - -@pytest.mark.parametrize('aggr', [ - 'softmax', - 'powermean', - ['softmax', 'powermean'], -]) -def test_gen_conv(aggr): - x1 = torch.randn(4, 16) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.randn(row.size(0), 16) - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj3 = adj1.to_torch_sparse_coo_tensor() - adj4 = adj2.to_torch_sparse_coo_tensor() - - conv = GENConv(16, 32, aggr, edge_dim=16, msg_norm=True) - assert str(conv) == f'GENConv(16, 32, aggr={aggr})' - out11 = conv(x1, edge_index) - assert out11.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, size=(4, 4)), out11) - assert torch.allclose(conv(x1, adj1.t()), out11) - assert torch.allclose(conv(x1, adj3.t().coalesce()), out11) - - out12 = conv(x1, edge_index, value) - assert out12.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, value, (4, 4)), out12) - assert torch.allclose(conv(x1, adj2.t()), out12) - # t() expects a tensor with <= 2 sparse and 0 dense dimensions - assert torch.allclose(conv(x1, adj4.transpose(1, 0).coalesce()), out12) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index), out11, atol=1e-6) - assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out11, - atol=1e-6) - assert torch.allclose(jit(x1, edge_index, value), out12, atol=1e-6) - assert torch.allclose(jit(x1, edge_index, value, size=(4, 4)), out12, - atol=1e-6) - - t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out11) - assert torch.allclose(jit(x1, adj2.t()), out12) - - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj2.sparse_resize((4, 2)) - adj3 = adj1.to_torch_sparse_coo_tensor() - adj4 = adj2.to_torch_sparse_coo_tensor() - - out21 = conv((x1, x2), edge_index) - assert out21.size() == (2, 32) - assert torch.allclose(conv((x1, x2), edge_index, size=(4, 2)), out21) - assert torch.allclose(conv((x1, x2), adj1.t()), out21) - assert torch.allclose(conv((x1, x2), adj3.t().coalesce()), out21) - - out22 = conv((x1, x2), edge_index, value) - assert out22.size() == (2, 32) - assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out22) - assert torch.allclose(conv((x1, x2), adj2.t()), out22) - assert torch.allclose(conv((x1, x2), - adj4.transpose(1, 0).coalesce()), out22) - - if is_full_test(): - t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index), out21, atol=1e-6) - assert torch.allclose(jit((x1, x2), edge_index, size=(4, 2)), out21, - atol=1e-6) - assert torch.allclose(jit((x1, x2), edge_index, value), out22, - atol=1e-6) - assert torch.allclose(jit((x1, x2), edge_index, value, (4, 2)), out22, - atol=1e-6) - - t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out21) - assert torch.allclose(jit((x1, x2), adj2.t()), out22) - - conv.reset_parameters() - assert float(conv.msg_norm.scale) == 1 - - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 16) - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_coo_tensor() - conv = GENConv((8, 16), 32, aggr) - assert str(conv) == f'GENConv((8, 16), 32, aggr={aggr})' - out1 = conv((x1, x2), edge_index) - out2 = conv((x1, None), edge_index, size=(4, 2)) - assert out1.size() == (2, 32) - assert out2.size() == (2, 32) - assert torch.allclose(conv((x1, x2), edge_index, size=(4, 2)), out1) - assert torch.allclose(conv((x1, x2), adj1.t()), out1) - assert torch.allclose(conv((x1, x2), adj2.t().coalesce()), out1) - assert torch.allclose(conv((x1, None), adj1.t()), out2) - assert torch.allclose(conv((x1, None), adj2.t().coalesce()), out2) - - value = torch.randn(row.size(0), 4) - adj1 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 2)) - adj2 = adj1.to_torch_sparse_coo_tensor() - conv = GENConv((-1, -1), 32, aggr, edge_dim=-1) - assert str(conv) == f'GENConv((-1, -1), 32, aggr={aggr})' - out1 = conv((x1, x2), edge_index, value) - out2 = conv((x1, None), edge_index, value, size=(4, 2)) - assert out1.size() == (2, 32) - assert out2.size() == (2, 32) - assert torch.allclose(conv((x1, x2), edge_index, value, size=(4, 2)), out1) - assert torch.allclose(conv((x1, x2), adj1.t()), out1) - assert torch.allclose(conv((x1, x2), - adj2.transpose(1, 0).coalesce()), out1) - assert torch.allclose(conv((x1, None), adj1.t()), out2) - assert torch.allclose(conv((x1, None), - adj2.transpose(1, 0).coalesce()), out2) - - if is_full_test(): - t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index, value), out1, - atol=1e-6) - assert torch.allclose(jit((x1, x2), edge_index, value, size=(4, 2)), - out1, atol=1e-6) - assert torch.allclose(jit((x1, None), edge_index, value, size=(4, 2)), - out2, atol=1e-6) - - t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit((x1, None), adj1.t()), out2, atol=1e-6) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_general_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_general_conv.py deleted file mode 100644 index c0c7910..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_general_conv.py +++ /dev/null @@ -1,58 +0,0 @@ -import torch - -from torch_geometric.nn import GeneralConv - - -def test_general_conv(): - x1 = torch.randn(4, 8) - e1 = torch.randn(4, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - - conv = GeneralConv(8, 32, 16) - assert str(conv) == 'GeneralConv(8, 32)' - out = conv(x1, edge_index, edge_attr=e1) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) - - conv = GeneralConv(8, 32, 16, skip_linear=True) - assert str(conv) == 'GeneralConv(8, 32)' - out = conv(x1, edge_index, edge_attr=e1) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) - - conv = GeneralConv(8, 32, 16, directed_msg=False) - assert str(conv) == 'GeneralConv(8, 32)' - out = conv(x1, edge_index, edge_attr=e1) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) - - conv = GeneralConv(8, 32, 16, heads=3) - assert str(conv) == 'GeneralConv(8, 32)' - out = conv(x1, edge_index, edge_attr=e1) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) - - conv = GeneralConv(8, 32, 16, attention=True) - assert str(conv) == 'GeneralConv(8, 32)' - out = conv(x1, edge_index, edge_attr=e1) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) - - conv = GeneralConv(8, 32, 16, heads=3, attention=True) - assert str(conv) == 'GeneralConv(8, 32)' - out = conv(x1, edge_index, edge_attr=e1) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) - - conv = GeneralConv(8, 32, 16, heads=3, attention=True, - attention_type='dot_product') - assert str(conv) == 'GeneralConv(8, 32)' - out = conv(x1, edge_index, edge_attr=e1) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) - - conv = GeneralConv(8, 32, 16, l2_normalize=True) - assert str(conv) == 'GeneralConv(8, 32)' - out = conv(x1, edge_index, edge_attr=e1) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, edge_attr=e1), out, atol=1e-7) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_gin_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_gin_conv.py deleted file mode 100644 index 69a7c68..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_gin_conv.py +++ /dev/null @@ -1,158 +0,0 @@ -import torch -from torch.nn import Linear as Lin -from torch.nn import ReLU -from torch.nn import Sequential as Seq -from torch_sparse import SparseTensor - -from torch_geometric.nn import GINConv, GINEConv -from torch_geometric.testing import is_full_test - - -def test_gin_conv(): - x1 = torch.randn(4, 16) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj.to_torch_sparse_csc_tensor() - - nn = Seq(Lin(16, 32), ReLU(), Lin(32, 32)) - conv = GINConv(nn, train_eps=True) - assert str(conv) == ( - 'GINConv(nn=Sequential(\n' - ' (0): Linear(in_features=16, out_features=32, bias=True)\n' - ' (1): ReLU()\n' - ' (2): Linear(in_features=32, out_features=32, bias=True)\n' - '))') - out = conv(x1, edge_index) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, size=(4, 4)), out, atol=1e-6) - assert torch.allclose(conv(x1, adj.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, edge_index).tolist() == out.tolist() - assert jit(x1, edge_index, size=(4, 4)).tolist() == out.tolist() - - t = '(Tensor, SparseTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, adj.t()).tolist() == out.tolist() - - adj = adj.sparse_resize((4, 2)) - adj2 = adj.to_torch_sparse_csc_tensor() - out1 = conv((x1, x2), edge_index) - out2 = conv((x1, None), edge_index, (4, 2)) - assert out1.size() == (2, 32) - assert out2.size() == (2, 32) - assert torch.allclose(conv((x1, x2), edge_index, (4, 2)), out1, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, None), adj.t()), out2, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, None), adj2.t()), out2, atol=1e-6) - - if is_full_test(): - t = '(OptPairTensor, Tensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), edge_index).tolist() == out1.tolist() - assert jit((x1, x2), edge_index, size=(4, 2)).tolist() == out1.tolist() - assert jit((x1, None), edge_index, - size=(4, 2)).tolist() == out2.tolist() - - t = '(OptPairTensor, SparseTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), adj.t()).tolist() == out1.tolist() - assert jit((x1, None), adj.t()).tolist() == out2.tolist() - - -def test_gine_conv(): - x1 = torch.randn(4, 16) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.randn(row.size(0), 16) - adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - - nn = Seq(Lin(16, 32), ReLU(), Lin(32, 32)) - conv = GINEConv(nn, train_eps=True) - assert str(conv) == ( - 'GINEConv(nn=Sequential(\n' - ' (0): Linear(in_features=16, out_features=32, bias=True)\n' - ' (1): ReLU()\n' - ' (2): Linear(in_features=32, out_features=32, bias=True)\n' - '))') - out = conv(x1, edge_index, value) - assert out.size() == (4, 32) - assert conv(x1, edge_index, value, size=(4, 4)).tolist() == out.tolist() - assert conv(x1, adj.t()).tolist() == out.tolist() - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, edge_index, value).tolist() == out.tolist() - assert jit(x1, edge_index, value, size=(4, 4)).tolist() == out.tolist() - - t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, adj.t()).tolist() == out.tolist() - - adj = adj.sparse_resize((4, 2)) - out1 = conv((x1, x2), edge_index, value) - out2 = conv((x1, None), edge_index, value, (4, 2)) - assert out1.size() == (2, 32) - assert out2.size() == (2, 32) - assert conv((x1, x2), edge_index, value, (4, 2)).tolist() == out1.tolist() - assert conv((x1, x2), adj.t()).tolist() == out1.tolist() - assert conv((x1, None), adj.t()).tolist() == out2.tolist() - - if is_full_test(): - t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), edge_index, value).tolist() == out1.tolist() - assert jit((x1, x2), edge_index, value, - size=(4, 2)).tolist() == out1.tolist() - assert jit((x1, None), edge_index, value, - size=(4, 2)).tolist() == out2.tolist() - - t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), adj.t()).tolist() == out1.tolist() - assert jit((x1, None), adj.t()).tolist() == out2.tolist() - - -def test_gine_conv_edge_dim(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - edge_attr = torch.randn(edge_index.size(1), 8) - - nn = Seq(Lin(16, 32), ReLU(), Lin(32, 32)) - conv = GINEConv(nn, train_eps=True, edge_dim=8) - out = conv(x, edge_index, edge_attr) - assert out.size() == (4, 32) - - nn = Lin(16, 32) - conv = GINEConv(nn, train_eps=True, edge_dim=8) - out = conv(x, edge_index, edge_attr) - assert out.size() == (4, 32) - - -def test_static_gin_conv(): - x = torch.randn(3, 4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - - nn = Seq(Lin(16, 32), ReLU(), Lin(32, 32)) - conv = GINConv(nn, train_eps=True) - out = conv(x, edge_index) - assert out.size() == (3, 4, 32) - - -def test_static_gine_conv(): - x = torch.randn(3, 4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - edge_attr = torch.randn(edge_index.size(1), 16) - - nn = Seq(Lin(16, 32), ReLU(), Lin(32, 32)) - conv = GINEConv(nn, train_eps=True) - out = conv(x, edge_index, edge_attr) - assert out.size() == (3, 4, 32) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_gmm_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_gmm_conv.py deleted file mode 100644 index 237cd49..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_gmm_conv.py +++ /dev/null @@ -1,88 +0,0 @@ -import pytest -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import GMMConv -from torch_geometric.testing import is_full_test - - -@pytest.mark.parametrize('separate_gaussians', [True, False]) -def test_gmm_conv(separate_gaussians): - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.rand(row.size(0), 3) - adj1 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_coo_tensor() - - conv = GMMConv(8, 32, dim=3, kernel_size=25, - separate_gaussians=separate_gaussians) - assert str(conv) == 'GMMConv(8, 32, dim=3)' - out = conv(x1, edge_index, value) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, value, size=(4, 4)), out) - assert torch.allclose(conv(x1, adj1.t()), out) - # t() expects a tensor with <= 2 sparse and 0 dense dimensions - assert torch.allclose(conv(x1, adj2.transpose(0, 1).coalesce()), out) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index, value), out) - assert torch.allclose(jit(x1, edge_index, value, size=(4, 4)), out) - - t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out) - - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_coo_tensor() - conv = GMMConv((8, 16), 32, dim=3, kernel_size=5, - separate_gaussians=separate_gaussians) - assert str(conv) == 'GMMConv((8, 16), 32, dim=3)' - out1 = conv((x1, x2), edge_index, value) - out2 = conv((x1, None), edge_index, value, (4, 2)) - assert out1.size() == (2, 32) - assert out2.size() == (2, 32) - assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out1) - assert torch.allclose(conv((x1, x2), adj1.t()), out1) - assert torch.allclose(conv((x1, x2), - adj2.transpose(0, 1).coalesce()), out1) - assert torch.allclose(conv((x1, None), adj1.t()), out2) - assert torch.allclose(conv((x1, None), - adj2.transpose(0, 1).coalesce()), out2) - - if is_full_test(): - t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index, value), out1) - assert torch.allclose(jit((x1, x2), edge_index, value, size=(4, 2)), - out1) - assert torch.allclose(jit((x1, None), edge_index, value, size=(4, 2)), - out2) - - t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out1) - assert torch.allclose(jit((x1, None), adj1.t()), out2) - - -@pytest.mark.parametrize('separate_gaussians', [True, False]) -def test_lazy_gmm_conv(separate_gaussians): - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - value = torch.rand(edge_index.size(1), 3) - - conv = GMMConv(-1, 32, dim=3, kernel_size=25, - separate_gaussians=separate_gaussians) - assert str(conv) == 'GMMConv(-1, 32, dim=3)' - out = conv(x1, edge_index, value) - assert out.size() == (4, 32) - - conv = GMMConv((-1, -1), 32, dim=3, kernel_size=25, - separate_gaussians=separate_gaussians) - assert str(conv) == 'GMMConv((-1, -1), 32, dim=3)' - out = conv((x1, x2), edge_index, value) - assert out.size() == (2, 32) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_gps_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_gps_conv.py deleted file mode 100644 index 21032cd..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_gps_conv.py +++ /dev/null @@ -1,30 +0,0 @@ -import pytest -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import GPSConv, SAGEConv - - -@pytest.mark.parametrize('norm', [None, 'batch_norm', 'layer_norm']) -def test_gps_conv(norm): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 3, 2]]) - row, col = edge_index - adj1 = SparseTensor(row=col, col=row, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - batch = torch.tensor([0, 0, 1, 1]) - - conv = GPSConv(16, conv=SAGEConv(16, 16), heads=4, norm=norm) - conv.reset_parameters() - assert str(conv) == ('GPSConv(16, conv=SAGEConv(16, 16, aggr=mean), ' - 'heads=4)') - - out = conv(x, edge_index) - assert out.size() == (4, 16) - assert torch.allclose(conv(x, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x, adj2.t()), out, atol=1e-6) - - out = conv(x, edge_index, batch) - assert out.size() == (4, 16) - assert torch.allclose(conv(x, adj1.t(), batch), out, atol=1e-6) - assert torch.allclose(conv(x, adj2.t(), batch), out, atol=1e-6) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_graph_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_graph_conv.py deleted file mode 100644 index c80bb6d..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_graph_conv.py +++ /dev/null @@ -1,86 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import GraphConv -from torch_geometric.testing import is_full_test - - -def test_graph_conv(): - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.randn(edge_index.size(1)) - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() - - conv = GraphConv(8, 32) - assert str(conv) == 'GraphConv(8, 32)' - out11 = conv(x1, edge_index) - assert out11.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, size=(4, 4)), out11, atol=1e-6) - assert torch.allclose(conv(x1, adj1.t()), out11, atol=1e-6) - assert torch.allclose(conv(x1, adj3.t()), out11, atol=1e-6) - - out12 = conv(x1, edge_index, value) - assert out12.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, value, size=(4, 4)), out12, - atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out12, atol=1e-6) - assert torch.allclose(conv(x1, adj4.t()), out12, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index), out11) - assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out11) - assert torch.allclose(jit(x1, edge_index, value), out12) - assert torch.allclose(jit(x1, edge_index, value, size=(4, 4)), out12) - - t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out11) - assert torch.allclose(jit(x1, adj2.t()), out12) - - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj2.sparse_resize((4, 2)) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() - conv = GraphConv((8, 16), 32) - assert str(conv) == 'GraphConv((8, 16), 32)' - out21 = conv((x1, x2), edge_index) - out22 = conv((x1, x2), edge_index, value) - out23 = conv((x1, None), edge_index, size=(4, 2)) - out24 = conv((x1, None), edge_index, value, size=(4, 2)) - assert out21.size() == (2, 32) - assert out22.size() == (2, 32) - assert out23.size() == (2, 32) - assert out24.size() == (2, 32) - assert torch.allclose(conv((x1, x2), edge_index, size=(4, 2)), out21, - atol=1e-6) - assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out22, - atol=1e-6) - assert torch.allclose(conv((x1, x2), adj1.t()), out21, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out22, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj3.t()), out21, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj4.t()), out22, atol=1e-6) - - if is_full_test(): - t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index), out21) - assert torch.allclose(jit((x1, x2), edge_index, size=(4, 2)), out21) - assert torch.allclose(jit((x1, x2), edge_index, value), out22) - assert torch.allclose(jit((x1, x2), edge_index, value, (4, 2)), out22) - assert torch.allclose(jit((x1, None), edge_index, size=(4, 2)), out23) - assert torch.allclose(jit((x1, None), edge_index, value, (4, 2)), - out24) - - t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out21, atol=1e-6) - assert torch.allclose(jit((x1, x2), adj2.t()), out22, atol=1e-6) - assert torch.allclose(jit((x1, None), adj1.t()), out23, atol=1e-6) - assert torch.allclose(jit((x1, None), adj2.t()), out24, atol=1e-6) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_han_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_han_conv.py deleted file mode 100644 index 35bfedc..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_han_conv.py +++ /dev/null @@ -1,127 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import HANConv -from torch_geometric.utils import coalesce - - -def test_han_conv(): - x_dict = { - 'author': torch.randn(6, 16), - 'paper': torch.randn(5, 12), - 'term': torch.randn(4, 3) - } - edge_index1 = coalesce(torch.randint(0, 6, (2, 7))) - edge_index2 = coalesce(torch.randint(0, 5, (2, 4))) - edge_index3 = coalesce(torch.randint(0, 3, (2, 5))) - edge_index_dict = { - ('author', 'metapath0', 'author'): edge_index1, - ('paper', 'metapath1', 'paper'): edge_index2, - ('paper', 'metapath2', 'paper'): edge_index3, - } - - adj_t_dict1 = {} - adj_t_dict2 = {} - for edge_type, edge_index in edge_index_dict.items(): - src_type, _, dst_type = edge_type - adj_t_dict1[edge_type] = SparseTensor( - row=edge_index[0], col=edge_index[1], - sparse_sizes=(x_dict[src_type].size(0), - x_dict[dst_type].size(0))).t() - adj_t_dict2[edge_type] = adj_t_dict1[ - edge_type].to_torch_sparse_csr_tensor() - - metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) - in_channels = {'author': 16, 'paper': 12, 'term': 3} - - conv = HANConv(in_channels, 16, metadata, heads=2) - assert str(conv) == 'HANConv(16, heads=2)' - out_dict1 = conv(x_dict, edge_index_dict) - assert len(out_dict1) == 3 - assert out_dict1['author'].size() == (6, 16) - assert out_dict1['paper'].size() == (5, 16) - assert out_dict1['term'] is None - del out_dict1['term'] - del x_dict['term'] - - out_dict2 = conv(x_dict, adj_t_dict1) - assert len(out_dict1) == len(out_dict2) - for key in out_dict1.keys(): - assert torch.allclose(out_dict1[key], out_dict2[key], atol=1e-6) - - out_dict3 = conv(x_dict, adj_t_dict2) - assert len(out_dict1) == len(out_dict3) - for key in out_dict3.keys(): - assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) - - # Test non-zero dropout: - conv = HANConv(in_channels, 16, metadata, heads=2, dropout=0.1) - assert str(conv) == 'HANConv(16, heads=2)' - out_dict1 = conv(x_dict, edge_index_dict) - assert len(out_dict1) == 2 - assert out_dict1['author'].size() == (6, 16) - assert out_dict1['paper'].size() == (5, 16) - - -def test_han_conv_lazy(): - x_dict = { - 'author': torch.randn(6, 16), - 'paper': torch.randn(5, 12), - } - edge_index1 = coalesce(torch.randint(0, 6, (2, 8))) - edge_index2 = coalesce(torch.randint(0, 5, (2, 6))) - edge_index_dict = { - ('author', 'to', 'author'): edge_index1, - ('paper', 'to', 'paper'): edge_index2, - } - - adj_t_dict1 = {} - adj_t_dict2 = {} - for edge_type, edge_index in edge_index_dict.items(): - src_type, _, dst_type = edge_type - adj_t_dict1[edge_type] = SparseTensor( - row=edge_index[0], col=edge_index[1], - sparse_sizes=(x_dict[src_type].size(0), - x_dict[dst_type].size(0))).t() - adj_t_dict2[edge_type] = adj_t_dict1[ - edge_type].to_torch_sparse_csr_tensor() - - metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) - conv = HANConv(-1, 16, metadata, heads=2) - assert str(conv) == 'HANConv(16, heads=2)' - out_dict1 = conv(x_dict, edge_index_dict) - assert len(out_dict1) == 2 - assert out_dict1['author'].size() == (6, 16) - assert out_dict1['paper'].size() == (5, 16) - - out_dict2 = conv(x_dict, adj_t_dict1) - assert len(out_dict1) == len(out_dict2) - for key in out_dict1.keys(): - assert torch.allclose(out_dict1[key], out_dict2[key], atol=1e-6) - - out_dict3 = conv(x_dict, adj_t_dict2) - assert len(out_dict1) == len(out_dict3) - for key in out_dict1.keys(): - assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) - - -def test_han_conv_empty_tensor(): - x_dict = { - 'author': torch.randn(6, 16), - 'paper': torch.empty(0, 12), - } - edge_index_dict = { - ('paper', 'to', 'author'): torch.empty((2, 0), dtype=torch.long), - ('author', 'to', 'paper'): torch.empty((2, 0), dtype=torch.long), - ('paper', 'to', 'paper'): torch.empty((2, 0), dtype=torch.long), - } - - metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) - in_channels = {'author': 16, 'paper': 12} - conv = HANConv(in_channels, 16, metadata, heads=2) - - out_dict = conv(x_dict, edge_index_dict) - assert len(out_dict) == 2 - assert out_dict['author'].size() == (6, 16) - assert torch.all(out_dict['author'] == 0) - assert out_dict['paper'].size() == (0, 16) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_heat_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_heat_conv.py deleted file mode 100644 index feecf59..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_heat_conv.py +++ /dev/null @@ -1,42 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import HEATConv -from torch_geometric.testing import is_full_test - - -def test_heat_conv(): - x = torch.randn(4, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - edge_attr = torch.randn((4, 2)) - adj = SparseTensor(row=row, col=col, value=edge_attr, sparse_sizes=(4, 4)) - node_type = torch.tensor([0, 0, 1, 2]) - edge_type = torch.tensor([0, 2, 1, 2]) - - conv = HEATConv(in_channels=8, out_channels=16, num_node_types=3, - num_edge_types=3, edge_type_emb_dim=5, edge_dim=2, - edge_attr_emb_dim=6, heads=2, concat=True) - assert str(conv) == 'HEATConv(8, 16, heads=2)' - out = conv(x, edge_index, node_type, edge_type, edge_attr) - assert out.size() == (4, 32) - assert torch.allclose(conv(x, adj.t(), node_type, edge_type), out) - - if is_full_test(): - t = '(Tensor, Tensor, Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose( - jit(x, edge_index, node_type, edge_type, edge_attr), out) - - conv = HEATConv(in_channels=8, out_channels=16, num_node_types=3, - num_edge_types=3, edge_type_emb_dim=5, edge_dim=2, - edge_attr_emb_dim=6, heads=2, concat=False) - assert str(conv) == 'HEATConv(8, 16, heads=2)' - out = conv(x, edge_index, node_type, edge_type, edge_attr) - assert out.size() == (4, 16) - assert torch.allclose(conv(x, adj.t(), node_type, edge_type), out) - - if is_full_test(): - t = '(Tensor, SparseTensor, Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj.t(), node_type, edge_type), out) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_hetero_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_hetero_conv.py deleted file mode 100644 index ad38a76..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_hetero_conv.py +++ /dev/null @@ -1,130 +0,0 @@ -import pytest -import torch - -from torch_geometric.data import HeteroData -from torch_geometric.nn import ( - GATConv, - GCNConv, - HeteroConv, - Linear, - MessagePassing, - SAGEConv, -) -from torch_geometric.testing import get_random_edge_index - - -@pytest.mark.parametrize('aggr', ['sum', 'mean', 'min', 'max', 'cat', None]) -def test_hetero_conv(aggr): - data = HeteroData() - data['paper'].x = torch.randn(50, 32) - data['author'].x = torch.randn(30, 64) - data['paper', 'paper'].edge_index = get_random_edge_index(50, 50, 200) - data['paper', 'author'].edge_index = get_random_edge_index(50, 30, 100) - data['paper', 'author'].edge_attr = torch.randn(100, 3) - data['author', 'paper'].edge_index = get_random_edge_index(30, 50, 100) - data['paper', 'paper'].edge_weight = torch.rand(200) - - # Unspecified edge types should be ignored: - data['author', 'author'].edge_index = get_random_edge_index(30, 30, 100) - - conv = HeteroConv( - { - ('paper', 'to', 'paper'): - GCNConv(-1, 64), - ('author', 'to', 'paper'): - SAGEConv((-1, -1), 64), - ('paper', 'to', 'author'): - GATConv((-1, -1), 64, edge_dim=3, add_self_loops=False), - }, aggr=aggr) - - assert len(list(conv.parameters())) > 0 - assert str(conv) == 'HeteroConv(num_relations=3)' - - out = conv(data.x_dict, data.edge_index_dict, data.edge_attr_dict, - edge_weight_dict=data.edge_weight_dict) - - assert len(out) == 2 - if aggr == 'cat': - assert out['paper'].size() == (50, 128) - assert out['author'].size() == (30, 64) - elif aggr is not None: - assert out['paper'].size() == (50, 64) - assert out['author'].size() == (30, 64) - else: - assert out['paper'].size() == (50, 2, 64) - assert out['author'].size() == (30, 1, 64) - - -class CustomConv(MessagePassing): - def __init__(self, out_channels): - super().__init__(aggr='add') - self.lin = Linear(-1, out_channels) - - def forward(self, x, edge_index, y, z): - return self.propagate(edge_index, x=x, y=y, z=z) - - def message(self, x_j, y_j, z_j): - return self.lin(torch.cat([x_j, y_j, z_j], dim=-1)) - - -def test_hetero_conv_with_custom_conv(): - data = HeteroData() - data['paper'].x = torch.randn(50, 32) - data['paper'].y = torch.randn(50, 3) - data['paper'].z = torch.randn(50, 3) - data['author'].x = torch.randn(30, 64) - data['author'].y = torch.randn(30, 3) - data['author'].z = torch.randn(30, 3) - data['paper', 'paper'].edge_index = get_random_edge_index(50, 50, 200) - data['paper', 'author'].edge_index = get_random_edge_index(50, 30, 100) - data['author', 'paper'].edge_index = get_random_edge_index(30, 50, 100) - - conv = HeteroConv({key: CustomConv(64) for key in data.edge_types}) - # Test node `args_dict` and `kwargs_dict` with `y_dict` and `z_dict`: - out = conv(data.x_dict, data.edge_index_dict, data.y_dict, - z_dict=data.z_dict) - assert len(out) == 2 - assert out['paper'].size() == (50, 64) - assert out['author'].size() == (30, 64) - - -class MessagePassingLoops(MessagePassing): - def __init__(self): - super().__init__() - self.add_self_loops = True - - -def test_hetero_conv_self_loop_error(): - HeteroConv({('a', 'to', 'a'): MessagePassingLoops()}) - with pytest.raises(ValueError, match="incorrect message passing"): - HeteroConv({('a', 'to', 'b'): MessagePassingLoops()}) - - -def test_hetero_conv_with_dot_syntax_node_types(): - data = HeteroData() - data['src.paper'].x = torch.randn(50, 32) - data['author'].x = torch.randn(30, 64) - edge_index = get_random_edge_index(50, 50, 200) - data['src.paper', 'src.paper'].edge_index = edge_index - data['src.paper', 'author'].edge_index = get_random_edge_index(50, 30, 100) - data['author', 'src.paper'].edge_index = get_random_edge_index(30, 50, 100) - data['src.paper', 'src.paper'].edge_weight = torch.rand(200) - - conv = HeteroConv({ - ('src.paper', 'to', 'src.paper'): - GCNConv(-1, 64), - ('author', 'to', 'src.paper'): - SAGEConv((-1, -1), 64), - ('src.paper', 'to', 'author'): - GATConv((-1, -1), 64, add_self_loops=False), - }) - - assert len(list(conv.parameters())) > 0 - assert str(conv) == 'HeteroConv(num_relations=3)' - - out = conv(data.x_dict, data.edge_index_dict, - edge_weight_dict=data.edge_weight_dict) - - assert len(out) == 2 - assert out['src.paper'].size() == (50, 64) - assert out['author'].size() == (30, 64) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_hgt_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_hgt_conv.py deleted file mode 100644 index bf6a522..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_hgt_conv.py +++ /dev/null @@ -1,211 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.data import HeteroData -from torch_geometric.nn import HGTConv -from torch_geometric.profile import benchmark -from torch_geometric.utils import coalesce - - -def test_hgt_conv_same_dimensions(): - x_dict = { - 'author': torch.randn(4, 16), - 'paper': torch.randn(6, 16), - } - - row = torch.randint(0, 4, (20, ), dtype=torch.long) - col = torch.randint(0, 6, (20, ), dtype=torch.long) - edge_index = coalesce(torch.stack([row, col], dim=0)) - - edge_index_dict = { - ('author', 'writes', 'paper'): edge_index, - ('paper', 'written_by', 'author'): edge_index.flip([0]), - } - - adj_t_dict1 = {} - adj_t_dict2 = {} - for edge_type, edge_index in edge_index_dict.items(): - src_type, _, dst_type = edge_type - adj_t_dict1[edge_type] = SparseTensor( - row=edge_index[0], col=edge_index[1], - sparse_sizes=(x_dict[src_type].size(0), - x_dict[dst_type].size(0))).t() - adj_t_dict2[edge_type] = adj_t_dict1[ - edge_type].to_torch_sparse_csr_tensor() - - metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) - - conv = HGTConv(16, 16, metadata, heads=2) - assert str(conv) == 'HGTConv(-1, 16, heads=2)' - out_dict1 = conv(x_dict, edge_index_dict) - assert len(out_dict1) == 2 - assert out_dict1['author'].size() == (4, 16) - assert out_dict1['paper'].size() == (6, 16) - - out_dict2 = conv(x_dict, adj_t_dict1) - assert len(out_dict1) == len(out_dict2) - for key in out_dict1.keys(): - assert torch.allclose(out_dict1[key], out_dict2[key], atol=1e-6) - - out_dict3 = conv(x_dict, adj_t_dict2) - assert len(out_dict1) == len(out_dict3) - for key in out_dict1.keys(): - assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) - - # TODO: Test JIT functionality. We need to wait on this one until PyTorch - # allows indexing `ParameterDict` mappings :( - - -def test_hgt_conv_different_dimensions(): - x_dict = { - 'author': torch.randn(4, 16), - 'paper': torch.randn(6, 32), - } - - row = torch.randint(0, 4, (20, ), dtype=torch.long) - col = torch.randint(0, 6, (20, ), dtype=torch.long) - edge_index = coalesce(torch.stack([row, col], dim=0)) - - edge_index_dict = { - ('author', 'writes', 'paper'): edge_index, - ('paper', 'written_by', 'author'): edge_index.flip([0]), - } - - adj_t_dict1 = {} - adj_t_dict2 = {} - for edge_type, edge_index in edge_index_dict.items(): - src_type, _, dst_type = edge_type - adj_t_dict1[edge_type] = SparseTensor( - row=edge_index[0], col=edge_index[1], - sparse_sizes=(x_dict[src_type].size(0), - x_dict[dst_type].size(0))).t() - adj_t_dict2[edge_type] = adj_t_dict1[ - edge_type].to_torch_sparse_csr_tensor() - - metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) - - conv = HGTConv(in_channels={ - 'author': 16, - 'paper': 32 - }, out_channels=32, metadata=metadata, heads=2) - assert str(conv) == 'HGTConv(-1, 32, heads=2)' - out_dict1 = conv(x_dict, edge_index_dict) - assert len(out_dict1) == 2 - assert out_dict1['author'].size() == (4, 32) - assert out_dict1['paper'].size() == (6, 32) - - out_dict2 = conv(x_dict, adj_t_dict1) - assert len(out_dict1) == len(out_dict2) - for key in out_dict1.keys(): - assert torch.allclose(out_dict1[key], out_dict2[key], atol=1e-6) - - out_dict3 = conv(x_dict, adj_t_dict2) - assert len(out_dict1) == len(out_dict3) - for node_type in out_dict1.keys(): - assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) - - -def test_hgt_conv_lazy(): - x_dict = { - 'author': torch.randn(4, 16), - 'paper': torch.randn(6, 32), - } - - row = torch.randint(0, 4, (20, ), dtype=torch.long) - col = torch.randint(0, 6, (20, ), dtype=torch.long) - edge_index = coalesce(torch.stack([row, col], dim=0)) - - edge_index_dict = { - ('author', 'writes', 'paper'): edge_index, - ('paper', 'written_by', 'author'): edge_index.flip([0]), - } - - adj_t_dict1 = {} - adj_t_dict2 = {} - for edge_type, edge_index in edge_index_dict.items(): - src_type, _, dst_type = edge_type - adj_t_dict1[edge_type] = SparseTensor( - row=edge_index[0], col=edge_index[1], - sparse_sizes=(x_dict[src_type].size(0), - x_dict[dst_type].size(0))).t() - adj_t_dict2[edge_type] = adj_t_dict1[ - edge_type].to_torch_sparse_csr_tensor() - - metadata = (list(x_dict.keys()), list(edge_index_dict.keys())) - - conv = HGTConv(-1, 32, metadata, heads=2) - assert str(conv) == 'HGTConv(-1, 32, heads=2)' - out_dict1 = conv(x_dict, edge_index_dict) - assert len(out_dict1) == 2 - assert out_dict1['author'].size() == (4, 32) - assert out_dict1['paper'].size() == (6, 32) - - out_dict2 = conv(x_dict, adj_t_dict1) - assert len(out_dict1) == len(out_dict2) - for key in out_dict1.keys(): - assert torch.allclose(out_dict1[key], out_dict2[key], atol=1e-6) - - out_dict3 = conv(x_dict, adj_t_dict2) - assert len(out_dict1) == len(out_dict3) - for key in out_dict1.keys(): - assert torch.allclose(out_dict1[key], out_dict3[key], atol=1e-6) - - -def test_hgt_conv_out_of_place(): - data = HeteroData() - data['author'].x = torch.randn(4, 16) - data['paper'].x = torch.randn(6, 32) - - index1 = torch.randint(0, 4, (20, ), dtype=torch.long) - index2 = torch.randint(0, 6, (20, ), dtype=torch.long) - - data['author', 'paper'].edge_index = torch.stack([index1, index2], dim=0) - data['paper', 'author'].edge_index = torch.stack([index2, index1], dim=0) - - conv = HGTConv(-1, 64, data.metadata(), heads=1) - - x_dict, edge_index_dict = data.x_dict, data.edge_index_dict - assert x_dict['author'].size() == (4, 16) - assert x_dict['paper'].size() == (6, 32) - - _ = conv(x_dict, edge_index_dict) - - assert x_dict['author'].size() == (4, 16) - assert x_dict['paper'].size() == (6, 32) - - -if __name__ == '__main__': - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cuda') - args = parser.parse_args() - - num_nodes, num_edges = 30_000, 300_000 - x_dict = { - 'paper': torch.randn(num_nodes, 64, device=args.device), - 'author': torch.randn(num_nodes, 64, device=args.device), - } - edge_index_dict = { - ('paper', 'to', 'paper'): - torch.randint(num_nodes, (2, num_edges), device=args.device), - ('author', 'to', 'paper'): - torch.randint(num_nodes, (2, num_edges), device=args.device), - ('paper', 'to', 'author'): - torch.randint(num_nodes, (2, num_edges), device=args.device), - } - - conv = HGTConv( - in_channels=64, - out_channels=64, - metadata=(list(x_dict.keys()), list(edge_index_dict.keys())), - heads=4, - ).to(args.device) - - benchmark( - funcs=[conv], - args=(x_dict, edge_index_dict), - num_steps=10 if args.device == 'cpu' else 100, - num_warmups=5 if args.device == 'cpu' else 50, - backward=False, - ) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_le_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_le_conv.py deleted file mode 100644 index 2e7d858..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_le_conv.py +++ /dev/null @@ -1,30 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import LEConv -from torch_geometric.testing import is_full_test - - -def test_le_conv(): - in_channels, out_channels = (16, 32) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - num_nodes = edge_index.max().item() + 1 - x = torch.randn((num_nodes, in_channels)) - adj1 = SparseTensor.from_edge_index(edge_index, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - conv = LEConv(in_channels, out_channels) - assert str(conv) == 'LEConv(16, 32)' - out = conv(x, edge_index) - assert out.size() == (num_nodes, out_channels) - assert torch.allclose(conv(x, adj1.t()), out) - assert torch.allclose(conv(x, adj2.t()), out) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - torch.allclose(jit(x, edge_index), out) - - t = '(Tensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_lg_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_lg_conv.py deleted file mode 100644 index 2ad940f..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_lg_conv.py +++ /dev/null @@ -1,38 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import LGConv -from torch_geometric.testing import is_full_test - - -def test_lg_conv(): - x = torch.randn(4, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.rand(row.size(0)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() - - conv = LGConv() - assert str(conv) == 'LGConv()' - out1 = conv(x, edge_index) - assert out1.size() == (4, 8) - assert torch.allclose(conv(x, adj1.t()), out1) - assert torch.allclose(conv(x, adj3.t()), out1) - out2 = conv(x, edge_index, value) - assert out2.size() == (4, 8) - assert torch.allclose(conv(x, adj2.t()), out2) - assert torch.allclose(conv(x, adj4.t()), out2) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, edge_index), out1) - assert torch.allclose(jit(x, edge_index, value), out2) - - t = '(Tensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out1) - assert torch.allclose(jit(x, adj2.t()), out2) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_message_passing.py b/pytorch_geometric-2.3.1/test/nn/conv/test_message_passing.py deleted file mode 100644 index 23fe855..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_message_passing.py +++ /dev/null @@ -1,578 +0,0 @@ -import copy -from typing import Tuple, Union - -import pytest -import torch -from torch import Tensor -from torch.nn import Linear -from torch_sparse import SparseTensor - -from torch_geometric.nn import MessagePassing, aggr -from torch_geometric.typing import Adj, OptPairTensor, OptTensor, Size -from torch_geometric.utils import scatter, spmm - - -class MyConv(MessagePassing): - def __init__(self, in_channels: Union[int, Tuple[int, int]], - out_channels: int, aggr: str = 'add'): - super().__init__(aggr=aggr) - - if isinstance(in_channels, int): - in_channels = (in_channels, in_channels) - - self.lin_l = Linear(in_channels[0], out_channels) - self.lin_r = Linear(in_channels[1], out_channels) - - def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj, - edge_weight: OptTensor = None, size: Size = None) -> Tensor: - if isinstance(x, Tensor): - x: OptPairTensor = (x, x) - - # propagate_type: (x: OptPairTensor, edge_weight: OptTensor) - out = self.propagate(edge_index, x=x, edge_weight=edge_weight, - size=size) - out = self.lin_l(out) - - x_r = x[1] - if x_r is not None: - out += self.lin_r(x_r) - - return out - - def message(self, x_j: Tensor, edge_weight: Tensor) -> Tensor: - return edge_weight.view(-1, 1) * x_j - - def message_and_aggregate(self, adj_t: SparseTensor, - x: OptPairTensor) -> Tensor: - return spmm(adj_t, x[0], reduce=self.aggr) - - -def test_my_conv_basic(): - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.randn(row.size(0)) - adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - torch_adj = adj.to_torch_sparse_csc_tensor() - - conv = MyConv(8, 32) - out = conv(x1, edge_index, value) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, value, (4, 4)), out, atol=1e-6) - assert torch.allclose(conv(x1, adj.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, torch_adj.t()), out, atol=1e-6) - conv.fuse = False - assert torch.allclose(conv(x1, adj.t()), out) - assert torch.allclose(conv(x1, torch_adj.t()), out, atol=1e-6) - conv.fuse = True - - adj = adj.sparse_resize((4, 2)) - torch_adj = adj.to_torch_sparse_csc_tensor() - - conv = MyConv((8, 16), 32) - out1 = conv((x1, x2), edge_index, value) - out2 = conv((x1, None), edge_index, value, (4, 2)) - assert out1.size() == (2, 32) - assert out2.size() == (2, 32) - assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out1) - assert torch.allclose(conv((x1, x2), adj.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, x2), torch_adj.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, None), adj.t()), out2, atol=1e-6) - assert torch.allclose(conv((x1, None), torch_adj.t()), out2, atol=1e-6) - conv.fuse = False - assert torch.allclose(conv((x1, x2), adj.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, x2), torch_adj.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, None), adj.t()), out2, atol=1e-6) - assert torch.allclose(conv((x1, None), torch_adj.t()), out2, atol=1e-6) - - # Test gradient computation for `torch.sparse` tensors: - conv.fuse = True - torch_adj_t = torch_adj.t().requires_grad_() - out = conv((x1, x2), torch_adj_t) - out.sum().backward() - assert torch_adj_t.grad is not None - - -def test_my_conv_out_of_bounds(): - x = torch.randn(3, 8) - value = torch.randn(4) - - conv = MyConv(8, 32) - - with pytest.raises(IndexError, match="valid indices"): - edge_index = torch.tensor([[-1, 1, 2, 2], [0, 0, 1, 1]]) - conv(x, edge_index, value) - - with pytest.raises(IndexError, match="valid indices"): - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - conv(x, edge_index, value) - - -def test_my_conv_jittable(): - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.randn(row.size(0)) - adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - - conv = MyConv(8, 32) - out = conv(x1, edge_index, value) - - t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index, value), out, atol=1e-6) - assert torch.allclose(jit(x1, edge_index, value, (4, 4)), out, atol=1e-6) - - t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj.t()), out, atol=1e-6) - jit.fuse = False - assert torch.allclose(jit(x1, adj.t()), out, atol=1e-6) - jit.fuse = True - - adj = adj.sparse_resize((4, 2)) - conv = MyConv((8, 16), 32) - out1 = conv((x1, x2), edge_index, value) - out2 = conv((x1, None), edge_index, value, (4, 2)) - - t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index, value), out1) - assert torch.allclose(jit((x1, x2), edge_index, value, (4, 2)), out1) - assert torch.allclose(jit((x1, None), edge_index, value, (4, 2)), out2) - - t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj.t()), out1, atol=1e-6) - assert torch.allclose(jit((x1, None), adj.t()), out2, atol=1e-6) - jit.fuse = False - assert torch.allclose(jit((x1, x2), adj.t()), out1, atol=1e-6) - assert torch.allclose(jit((x1, None), adj.t()), out2, atol=1e-6) - jit.fuse = True - - -@pytest.mark.parametrize('aggr', ['add', 'sum', 'mean', 'min', 'max', 'mul']) -def test_my_conv_aggr(aggr): - x = torch.randn(4, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - edge_weight = torch.randn(edge_index.size(1)) - - conv = MyConv(8, 32, aggr=aggr) - out = conv(x, edge_index, edge_weight) - assert out.size() == (4, 32) - - -def test_my_static_graph_conv(): - x1 = torch.randn(3, 4, 8) - x2 = torch.randn(3, 2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.randn(row.size(0)) - adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - - conv = MyConv(8, 32) - out = conv(x1, edge_index, value) - assert out.size() == (3, 4, 32) - assert torch.allclose(conv(x1, edge_index, value, (4, 4)), out) - assert torch.allclose(conv(x1, adj.t()), out) - - adj = adj.sparse_resize((4, 2)) - conv = MyConv((8, 16), 32) - out1 = conv((x1, x2), edge_index, value) - assert out1.size() == (3, 2, 32) - assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out1) - assert torch.allclose(conv((x1, x2), adj.t()), out1) - out2 = conv((x1, None), edge_index, value, (4, 2)) - assert out2.size() == (3, 2, 32) - assert torch.allclose(conv((x1, None), adj.t()), out2) - - -class MyMultipleAggrConv(MessagePassing): - def __init__(self, **kwargs): - super().__init__(aggr=['add', 'mean', 'max'], **kwargs) - - def forward(self, x: Tensor, edge_index: Adj) -> Tensor: - # propagate_type: (x: Tensor) - return self.propagate(edge_index, x=x, size=None) - - -@pytest.mark.parametrize('multi_aggr_tuple', [ - (dict(mode='cat'), 3), - (dict(mode='proj', mode_kwargs=dict(in_channels=16, out_channels=16)), 1) -]) -def test_my_multiple_aggr_conv(multi_aggr_tuple): - # The 'cat' combine mode will expand the output dimensions by - # the number of aggregators which is 3 here, while the 'proj' - # mode keeps output dimensions unchanged. - aggr_kwargs, expand = multi_aggr_tuple - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - torch_adj = adj.to_torch_sparse_csc_tensor() - - conv = MyMultipleAggrConv(aggr_kwargs=aggr_kwargs) - out = conv(x, edge_index) - assert out.size() == (4, 16 * expand) - assert torch.allclose(conv(x, adj.t()), out) - assert torch.allclose(conv(x, torch_adj.t()), out) - - -def test_my_multiple_aggr_conv_jittable(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - - conv = MyMultipleAggrConv() - out = conv(x, edge_index) - - t = '(Tensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, edge_index), out) - - t = '(Tensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj.t()), out) - - -def test_copy(): - conv = MyConv(8, 32) - conv2 = copy.copy(conv) - - assert conv != conv2 - assert conv.lin_l.weight.tolist() == conv2.lin_l.weight.tolist() - assert conv.lin_r.weight.tolist() == conv2.lin_r.weight.tolist() - assert conv.lin_l.weight.data_ptr == conv2.lin_l.weight.data_ptr - assert conv.lin_r.weight.data_ptr == conv2.lin_r.weight.data_ptr - - conv = copy.deepcopy(conv) - assert conv != conv2 - assert conv.lin_l.weight.tolist() == conv2.lin_l.weight.tolist() - assert conv.lin_r.weight.tolist() == conv2.lin_r.weight.tolist() - assert conv.lin_l.weight.data_ptr != conv2.lin_l.weight.data_ptr - assert conv.lin_r.weight.data_ptr != conv2.lin_r.weight.data_ptr - - -class MyEdgeConv(MessagePassing): - def __init__(self): - super().__init__(aggr='add') - - def forward(self, x: Tensor, edge_index: Adj) -> Tensor: - # edge_updater_type: (x: Tensor) - edge_attr = self.edge_updater(edge_index, x=x) - - # propagate_type: (edge_attr: Tensor) - return self.propagate(edge_index, edge_attr=edge_attr, - size=(x.size(0), x.size(0))) - - def edge_update(self, x_j: Tensor, x_i: Tensor) -> Tensor: - return x_j - x_i - - def message(self, edge_attr: Tensor) -> Tensor: - return edge_attr - - -def test_my_edge_conv(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - torch_adj = adj.to_torch_sparse_csc_tensor() - - expected = scatter(x[row] - x[col], col, dim=0, dim_size=4, reduce='sum') - - conv = MyEdgeConv() - out = conv(x, edge_index) - assert out.size() == (4, 16) - assert torch.allclose(out, expected) - assert torch.allclose(conv(x, adj.t()), out) - assert torch.allclose(conv(x, torch_adj.t()), out) - - -def test_my_edge_conv_jittable(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - - conv = MyEdgeConv() - out = conv(x, edge_index) - - t = '(Tensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, edge_index), out) - - t = '(Tensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj.t()), out) - - -num_pre_hook_calls = 0 -num_hook_calls = 0 - - -def test_message_passing_hooks(): - conv = MyConv(8, 32) - - x = torch.randn(4, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.randn(row.size(0)) - adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - - def pre_hook(module, inputs): - assert module == conv - global num_pre_hook_calls - num_pre_hook_calls += 1 - return inputs - - def hook(module, inputs, output): - assert module == conv - global num_hook_calls - num_hook_calls += 1 - return output - - handle1 = conv.register_propagate_forward_pre_hook(pre_hook) - assert len(conv._propagate_forward_pre_hooks) == 1 - handle2 = conv.register_propagate_forward_hook(hook) - assert len(conv._propagate_forward_hooks) == 1 - - handle3 = conv.register_message_forward_pre_hook(pre_hook) - assert len(conv._message_forward_pre_hooks) == 1 - handle4 = conv.register_message_forward_hook(hook) - assert len(conv._message_forward_hooks) == 1 - - handle5 = conv.register_aggregate_forward_pre_hook(pre_hook) - assert len(conv._aggregate_forward_pre_hooks) == 1 - handle6 = conv.register_aggregate_forward_hook(hook) - assert len(conv._aggregate_forward_hooks) == 1 - - handle7 = conv.register_message_and_aggregate_forward_pre_hook(pre_hook) - assert len(conv._message_and_aggregate_forward_pre_hooks) == 1 - handle8 = conv.register_message_and_aggregate_forward_hook(hook) - assert len(conv._message_and_aggregate_forward_hooks) == 1 - - out1 = conv(x, edge_index, value) - assert num_pre_hook_calls == 3 - assert num_hook_calls == 3 - out2 = conv(x, adj.t()) - assert num_pre_hook_calls == 5 - assert num_hook_calls == 5 - assert torch.allclose(out1, out2) - - handle1.remove() - assert len(conv._propagate_forward_pre_hooks) == 0 - handle2.remove() - assert len(conv._propagate_forward_hooks) == 0 - - handle3.remove() - assert len(conv._message_forward_pre_hooks) == 0 - handle4.remove() - assert len(conv._message_forward_hooks) == 0 - - handle5.remove() - assert len(conv._aggregate_forward_pre_hooks) == 0 - handle6.remove() - assert len(conv._aggregate_forward_hooks) == 0 - - handle7.remove() - assert len(conv._message_and_aggregate_forward_pre_hooks) == 0 - handle8.remove() - assert len(conv._message_and_aggregate_forward_hooks) == 0 - - conv = MyEdgeConv() - - handle1 = conv.register_edge_update_forward_pre_hook(pre_hook) - assert len(conv._edge_update_forward_pre_hooks) == 1 - handle2 = conv.register_edge_update_forward_hook(hook) - assert len(conv._edge_update_forward_hooks) == 1 - - out1 = conv(x, edge_index) - assert num_pre_hook_calls == 6 - assert num_hook_calls == 6 - out2 = conv(x, adj.t()) - assert num_pre_hook_calls == 7 - assert num_hook_calls == 7 - assert torch.allclose(out1, out2) - - handle1.remove() - assert len(conv._propagate_forward_pre_hooks) == 0 - handle2.remove() - assert len(conv._propagate_forward_hooks) == 0 - - -def test_modified_message_passing_hook(): - conv = MyConv(8, 32) - - x = torch.randn(4, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - edge_weight = torch.randn(edge_index.size(1)) - - out1 = conv(x, edge_index, edge_weight) - - def hook(module, inputs, output): - assert len(inputs) == 1 - assert len(inputs[-1]) == 2 - assert 'x_j' in inputs[-1] - assert 'edge_weight' in inputs[-1] - return output + 1. - - conv.register_message_forward_hook(hook) - - out2 = conv(x, edge_index, edge_weight) - assert not torch.allclose(out1, out2) - - -class MyDefaultArgConv(MessagePassing): - def __init__(self): - super().__init__(aggr='mean') - - # propagate_type: (x: Tensor) - def forward(self, x: Tensor, edge_index: Adj) -> Tensor: - return self.propagate(edge_index, x=x) - - def message(self, x_j, zeros: bool = True): - return x_j * 0 if zeros else x_j - - -def test_my_default_arg_conv(): - x = torch.randn(4, 1) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - torch_adj = adj.to_torch_sparse_csc_tensor() - - conv = MyDefaultArgConv() - assert conv(x, edge_index).view(-1).tolist() == [0, 0, 0, 0] - assert conv(x, adj.t()).view(-1).tolist() == [0, 0, 0, 0] - assert conv(x, torch_adj.t()).view(-1).tolist() == [0, 0, 0, 0] - - -def test_my_default_arg_conv_jittable(): - conv = MyDefaultArgConv() - - with pytest.raises(RuntimeError): # This should not succeed in JIT mode. - torch.jit.script(conv.jittable()) - - -class MyMultipleOutputConv(MessagePassing): - def __init__(self): - super().__init__() - - def forward(self, x: Tensor, edge_index: Tensor) -> Tuple[Tensor, Tensor]: - # propagate_type: (x: Tensor) - return self.propagate(edge_index, x=x, size=None) - - def message(self, x_j: Tensor) -> Tuple[Tensor, Tensor]: - return x_j, x_j - - def aggregate(self, inputs: Tuple[Tensor, Tensor], - index: Tensor) -> Tuple[Tensor, Tensor]: - return (scatter(inputs[0], index, dim=0, reduce='sum'), - scatter(inputs[0], index, dim=0, reduce='mean')) - - def update(self, inputs: Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tensor]: - return inputs - - -def test_tuple_output(): - conv = MyMultipleOutputConv() - - x = torch.randn(4, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - - out1 = conv(x, edge_index) - assert isinstance(out1, tuple) and len(out1) == 2 - - -def test_tuple_output_jittable(): - conv = MyMultipleOutputConv() - - x = torch.randn(4, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - - out1 = conv(x, edge_index) - assert isinstance(out1, tuple) and len(out1) == 2 - - jit = torch.jit.script(conv.jittable()) - out2 = jit(x, edge_index) - assert isinstance(out2, tuple) and len(out2) == 2 - assert torch.allclose(out1[0], out2[0]) - assert torch.allclose(out1[1], out2[1]) - - -class MyExplainConv(MessagePassing): - def __init__(self): - super().__init__(aggr='add') - - def forward(self, x: Tensor, edge_index: Adj) -> Tensor: - return self.propagate(edge_index, x=x) - - -def test_explain_message(): - x = torch.randn(4, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - - conv = MyExplainConv() - assert conv(x, edge_index).abs().sum() != 0. - - conv.explain = True - - with pytest.raises(ValueError, match="pre-defined 'edge_mask'"): - conv(x, edge_index) - - conv._edge_mask = torch.tensor([0, 0, 0, 0], dtype=torch.float) - conv._apply_sigmoid = False - assert conv(x, edge_index).abs().sum() == 0. - - -class MyAggregatorConv(MessagePassing): - def __init__(self, **kwargs): - super().__init__(**kwargs) - - def forward(self, x: Tensor, edge_index: Adj) -> Tensor: - # propagate_type: (x: TEnsor) - return self.propagate(edge_index, x=x, size=None) - - -@pytest.mark.parametrize('aggr_module', [ - aggr.MeanAggregation(), - aggr.SumAggregation(), - aggr.MaxAggregation(), - aggr.SoftmaxAggregation(), - aggr.PowerMeanAggregation(), - aggr.MultiAggregation(['mean', 'max']) -]) -def test_message_passing_with_aggr_module(aggr_module): - x = torch.randn(4, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - - conv = MyAggregatorConv(aggr=aggr_module) - assert isinstance(conv.aggr_module, aggr.Aggregation) - out = conv(x, edge_index) - assert out.size(0) == 4 and out.size(1) in {8, 16} - assert torch.allclose(conv(x, adj.t()), out) - - -def test_message_passing_int32_edge_index(): - # Check that we can dispatch an int32 edge_index up to aggregation - x = torch.randn(4, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]], dtype=torch.int32) - edge_weight = torch.randn(edge_index.shape[1]) - - # Use a hook to promote the edge_index to long to workaround PyTorch CPU - # backend restriction to int64 for the index. - def cast_index_hook(module, inputs): - input_dict = inputs[-1] - input_dict['index'] = input_dict['index'].long() - return (input_dict, ) - - conv = MyConv(8, 32) - conv.register_aggregate_forward_pre_hook(cast_index_hook) - - assert conv(x, edge_index, edge_weight).size() == (4, 32) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_mf_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_mf_conv.py deleted file mode 100644 index c3f286c..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_mf_conv.py +++ /dev/null @@ -1,54 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import MFConv -from torch_geometric.testing import is_full_test - - -def test_mf_conv(): - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - - conv = MFConv(8, 32) - assert str(conv) == 'MFConv(8, 32)' - out = conv(x1, edge_index) - assert out.size() == (4, 32) - assert conv(x1, edge_index, size=(4, 4)).tolist() == out.tolist() - assert conv(x1, adj.t()).tolist() == out.tolist() - - if is_full_test(): - t = '(Tensor, Tensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, edge_index).tolist() == out.tolist() - assert jit(x1, edge_index, size=(4, 4)).tolist() == out.tolist() - - t = '(Tensor, SparseTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, adj.t()).tolist() == out.tolist() - - adj = adj.sparse_resize((4, 2)) - conv = MFConv((8, 16), 32) - assert str(conv) == 'MFConv((8, 16), 32)' - out1 = conv((x1, x2), edge_index) - out2 = conv((x1, None), edge_index, (4, 2)) - assert out1.size() == (2, 32) - assert out2.size() == (2, 32) - assert conv((x1, x2), edge_index, (4, 2)).tolist() == out1.tolist() - assert conv((x1, x2), adj.t()).tolist() == out1.tolist() - assert conv((x1, None), adj.t()).tolist() == out2.tolist() - - if is_full_test(): - t = '(OptPairTensor, Tensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), edge_index).tolist() == out1.tolist() - assert jit((x1, x2), edge_index, size=(4, 2)).tolist() == out1.tolist() - assert jit((x1, None), edge_index, - size=(4, 2)).tolist() == out2.tolist() - - t = '(OptPairTensor, SparseTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, x2), adj.t()).tolist() == out1.tolist() - assert jit((x1, None), adj.t()).tolist() == out2.tolist() diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_nn_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_nn_conv.py deleted file mode 100644 index dac4195..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_nn_conv.py +++ /dev/null @@ -1,77 +0,0 @@ -import torch -from torch.nn import Linear as Lin -from torch.nn import ReLU -from torch.nn import Sequential as Seq -from torch_sparse import SparseTensor - -from torch_geometric.nn import NNConv -from torch_geometric.testing import is_full_test - - -def test_nn_conv(): - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.rand(row.size(0), 3) - adj1 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_coo_tensor() - - nn = Seq(Lin(3, 32), ReLU(), Lin(32, 8 * 32)) - conv = NNConv(8, 32, nn=nn) - assert str(conv) == ( - 'NNConv(8, 32, aggr=add, nn=Sequential(\n' - ' (0): Linear(in_features=3, out_features=32, bias=True)\n' - ' (1): ReLU()\n' - ' (2): Linear(in_features=32, out_features=256, bias=True)\n' - '))') - out = conv(x1, edge_index, value) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, value, size=(4, 4)), out) - assert torch.allclose(conv(x1, adj1.t()), out) - assert torch.allclose(conv(x1, adj2.transpose(0, 1).coalesce()), out) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index, value), out) - assert torch.allclose(jit(x1, edge_index, value, size=(4, 4)), out) - - t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out) - - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_coo_tensor() - conv = NNConv((8, 16), 32, nn=nn) - assert str(conv) == ( - 'NNConv((8, 16), 32, aggr=add, nn=Sequential(\n' - ' (0): Linear(in_features=3, out_features=32, bias=True)\n' - ' (1): ReLU()\n' - ' (2): Linear(in_features=32, out_features=256, bias=True)\n' - '))') - out1 = conv((x1, x2), edge_index, value) - out2 = conv((x1, None), edge_index, value, (4, 2)) - assert out1.size() == (2, 32) - assert out2.size() == (2, 32) - assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out1) - assert torch.allclose(conv((x1, x2), adj1.t()), out1) - assert torch.allclose(conv((x1, x2), - adj2.transpose(0, 1).coalesce()), out1) - assert torch.allclose(conv((x1, None), adj1.t()), out2) - assert torch.allclose(conv((x1, None), - adj2.transpose(0, 1).coalesce()), out2) - - if is_full_test(): - t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index, value), out1) - assert torch.allclose(jit((x1, x2), edge_index, value, size=(4, 2)), - out1) - assert torch.allclose(jit((x1, None), edge_index, value, size=(4, 2)), - out2) - - t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out1) - assert torch.allclose(jit((x1, None), adj1.t()), out2) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_pan_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_pan_conv.py deleted file mode 100644 index 3858f14..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_pan_conv.py +++ /dev/null @@ -1,25 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import PANConv - - -def test_pan_conv(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - conv = PANConv(16, 32, filter_size=2) - assert str(conv) == 'PANConv(16, 32, filter_size=2)' - out1, M1 = conv(x, edge_index) - assert out1.size() == (4, 32) - - out2, M2 = conv(x, adj1.t()) - assert torch.allclose(out1, out2, atol=1e-6) - assert torch.allclose(M1.to_dense(), M2.to_dense()) - - out3, M3 = conv(x, adj2.t()) - assert torch.allclose(out1, out3, atol=1e-6) - assert torch.allclose(M1.to_dense(), M3.to_dense()) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_pdn_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_pdn_conv.py deleted file mode 100644 index 0bfc744..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_pdn_conv.py +++ /dev/null @@ -1,43 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import PDNConv -from torch_geometric.testing import is_full_test - - -def test_pdn_conv(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - edge_attr = torch.randn(6, 8) - adj = SparseTensor(row=row, col=col, value=edge_attr, sparse_sizes=(4, 4)) - - conv = PDNConv(16, 32, edge_dim=8, hidden_channels=128) - assert str(conv) == "PDNConv(16, 32)" - out = conv(x, edge_index, edge_attr) - assert out.size() == (4, 32) - assert torch.allclose(conv(x, adj.t()), out, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, edge_index, edge_attr), out) - - t = '(Tensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj.t()), out, atol=1e-6) - - -def test_pdn_conv_with_sparse_node_input_feature(): - x = torch.sparse_coo_tensor( - indices=torch.tensor([[0, 0], [0, 1]]), - values=torch.tensor([1.0, 1.0]), - size=torch.Size([4, 16]), - ) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - edge_attr = torch.randn(6, 8) - - conv = PDNConv(16, 32, edge_dim=8, hidden_channels=128) - - out = conv(x, edge_index, edge_attr) - assert out.size() == (4, 32) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_pna_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_pna_conv.py deleted file mode 100644 index 74ba284..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_pna_conv.py +++ /dev/null @@ -1,73 +0,0 @@ -import pytest -import torch -from torch_sparse import SparseTensor - -from torch_geometric.data import Data -from torch_geometric.loader import DataLoader, NeighborLoader -from torch_geometric.nn import PNAConv -from torch_geometric.testing import is_full_test - -aggregators = ['sum', 'mean', 'min', 'max', 'var', 'std'] -scalers = [ - 'identity', 'amplification', 'attenuation', 'linear', 'inverse_linear' -] - - -@pytest.mark.parametrize('divide_input', [True, False]) -def test_pna_conv(divide_input): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - deg = torch.tensor([0, 3, 0, 1]) - row, col = edge_index - value = torch.rand(row.size(0), 3) - adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - conv = PNAConv(16, 32, aggregators, scalers, deg=deg, edge_dim=3, towers=4, - pre_layers=2, post_layers=2, divide_input=divide_input) - assert str(conv) == 'PNAConv(16, 32, towers=4, edge_dim=3)' - out = conv(x, edge_index, value) - assert out.size() == (4, 32) - assert torch.allclose(conv(x, adj.t()), out, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, edge_index, value), out, atol=1e-6) - - t = '(Tensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj.t()), out, atol=1e-6) - - -def test_pna_conv_get_degree_histogram(): - edge_index = torch.tensor([[0, 0, 0, 1, 1, 2, 3], [1, 2, 3, 2, 0, 0, 0]]) - data = Data(num_nodes=5, edge_index=edge_index) - loader = NeighborLoader( - data, - num_neighbors=[-1], - input_nodes=None, - batch_size=5, - shuffle=False, - ) - deg_hist = PNAConv.get_degree_histogram(loader) - deg_hist_ref = torch.tensor([1, 2, 1, 1]) - assert torch.equal(deg_hist_ref, deg_hist) - - edge_index_1 = torch.tensor([[0, 0, 0, 1, 1, 2, 3], [1, 2, 3, 2, 0, 0, 0]]) - edge_index_2 = torch.tensor([[1, 1, 2, 2, 0, 3, 3], [2, 3, 3, 1, 1, 0, 2]]) - edge_index_3 = torch.tensor([[1, 3, 2, 0, 0, 4, 2], [2, 0, 4, 1, 1, 0, 3]]) - edge_index_4 = torch.tensor([[0, 1, 2, 4, 0, 1, 3], [2, 3, 3, 1, 1, 0, 2]]) - - data_1 = Data(num_nodes=5, - edge_index=edge_index_1) # deg_hist = [1, 2 ,1 ,1] - data_2 = Data(num_nodes=5, edge_index=edge_index_2) # deg_hist = [1, 1, 3] - data_3 = Data(num_nodes=5, edge_index=edge_index_3) # deg_hist = [0, 3, 2] - data_4 = Data(num_nodes=5, edge_index=edge_index_4) # deg_hist = [1, 1, 3] - - loader = DataLoader( - [data_1, data_2, data_3, data_4], - batch_size=1, - shuffle=False, - ) - deg_hist = PNAConv.get_degree_histogram(loader) - deg_hist_ref = torch.tensor([3, 7, 9, 1]) - assert torch.equal(deg_hist_ref, deg_hist) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_point_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_point_conv.py deleted file mode 100644 index f83d9df..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_point_conv.py +++ /dev/null @@ -1,66 +0,0 @@ -import torch -from torch.nn import Linear as Lin -from torch.nn import ReLU -from torch.nn import Sequential as Seq -from torch_sparse import SparseTensor - -from torch_geometric.nn import PointNetConv -from torch_geometric.testing import is_full_test - - -def test_point_net_conv(): - x1 = torch.randn(4, 16) - pos1 = torch.randn(4, 3) - pos2 = torch.randn(2, 3) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - local_nn = Seq(Lin(16 + 3, 32), ReLU(), Lin(32, 32)) - global_nn = Seq(Lin(32, 32)) - conv = PointNetConv(local_nn, global_nn) - assert str(conv) == ( - 'PointNetConv(local_nn=Sequential(\n' - ' (0): Linear(in_features=19, out_features=32, bias=True)\n' - ' (1): ReLU()\n' - ' (2): Linear(in_features=32, out_features=32, bias=True)\n' - '), global_nn=Sequential(\n' - ' (0): Linear(in_features=32, out_features=32, bias=True)\n' - '))') - out = conv(x1, pos1, edge_index) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, pos1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, pos1, adj2.t()), out, atol=1e-6) - - if is_full_test(): - t = '(OptTensor, Tensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x1, pos1, edge_index).tolist() == out.tolist() - - t = '(OptTensor, Tensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, pos1, adj1.t()), out, atol=1e-6) - - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() - out = conv(x1, (pos1, pos2), edge_index) - assert out.size() == (2, 32) - assert conv((x1, None), (pos1, pos2), edge_index).tolist() == out.tolist() - assert torch.allclose(conv(x1, (pos1, pos2), adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, (pos1, pos2), adj2.t()), out, atol=1e-6) - assert torch.allclose(conv((x1, None), (pos1, pos2), adj1.t()), out, - atol=1e-6) - assert torch.allclose(conv((x1, None), (pos1, pos2), adj2.t()), out, - atol=1e-6) - - if is_full_test(): - t = '(PairOptTensor, PairTensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit((x1, None), (pos1, pos2), - edge_index).tolist() == out.tolist() - - t = '(PairOptTensor, PairTensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, None), (pos1, pos2), adj1.t()), out, - atol=1e-6) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_point_gnn_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_point_gnn_conv.py deleted file mode 100644 index a007993..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_point_gnn_conv.py +++ /dev/null @@ -1,39 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import MLP, PointGNNConv -from torch_geometric.testing import is_full_test - - -def test_point_gnn_conv(): - x = torch.randn(6, 8) - pos = torch.randn(6, 3) - edge_index = torch.tensor([[0, 1, 1, 1, 2, 5], [1, 2, 3, 4, 3, 4]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(6, 6)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - conv = PointGNNConv( - mlp_h=MLP([8, 16, 3]), - mlp_f=MLP([3 + 8, 16, 8]), - mlp_g=MLP([8, 16, 8]), - ) - assert str(conv) == ('PointGNNConv(\n' - ' mlp_h=MLP(8, 16, 3),\n' - ' mlp_f=MLP(11, 16, 8),\n' - ' mlp_g=MLP(8, 16, 8),\n' - ')') - - out = conv(x, pos, edge_index) - assert out.size() == (6, 8) - assert torch.allclose(conv(x, pos, adj1.t()), out) - assert torch.allclose(conv(x, pos, adj2.t()), out) - - if is_full_test(): - t = '(Tensor, Tensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, pos, edge_index), out) - - t = '(Tensor, Tensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, pos, adj1.t()), out) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_point_transformer_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_point_transformer_conv.py deleted file mode 100644 index bd8d115..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_point_transformer_conv.py +++ /dev/null @@ -1,65 +0,0 @@ -import torch -from torch.nn import Linear, ReLU, Sequential -from torch_sparse import SparseTensor - -from torch_geometric.nn import PointTransformerConv -from torch_geometric.testing import is_full_test - - -def test_point_transformer_conv(): - x1 = torch.rand(4, 16) - x2 = torch.randn(2, 8) - pos1 = torch.rand(4, 3) - pos2 = torch.randn(2, 3) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - conv = PointTransformerConv(in_channels=16, out_channels=32) - assert str(conv) == 'PointTransformerConv(16, 32)' - - out = conv(x1, pos1, edge_index) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, pos1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, pos1, adj2.t()), out, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, pos1, edge_index), out, atol=1e-6) - - t = '(Tensor, Tensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, pos1, adj1.t()), out, atol=1e-6) - - pos_nn = Sequential(Linear(3, 16), ReLU(), Linear(16, 32)) - attn_nn = Sequential(Linear(32, 32), ReLU(), Linear(32, 32)) - conv = PointTransformerConv(16, 32, pos_nn, attn_nn) - - out = conv(x1, pos1, edge_index) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, pos1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, pos1, adj2.t()), out, atol=1e-6) - - conv = PointTransformerConv((16, 8), 32) - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - out = conv((x1, x2), (pos1, pos2), edge_index) - assert out.size() == (2, 32) - assert torch.allclose(conv((x1, x2), (pos1, pos2), adj1.t()), out, - atol=1e-6) - assert torch.allclose(conv((x1, x2), (pos1, pos2), adj2.t()), out, - atol=1e-6) - - if is_full_test(): - t = '(PairTensor, PairTensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), (pos1, pos2), edge_index), out, - atol=1e-6) - - t = '(PairTensor, PairTensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), (pos1, pos2), adj1.t()), out, - atol=1e-6) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_ppf_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_ppf_conv.py deleted file mode 100644 index fc1c585..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_ppf_conv.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch -import torch.nn.functional as F -from torch.nn import Linear as Lin -from torch.nn import ReLU -from torch.nn import Sequential as Seq -from torch_sparse import SparseTensor - -from torch_geometric.nn import PPFConv -from torch_geometric.testing import is_full_test - - -def test_ppf_conv(): - x1 = torch.randn(4, 16) - pos1 = torch.randn(4, 3) - pos2 = torch.randn(2, 3) - n1 = F.normalize(torch.rand(4, 3), dim=-1) - n2 = F.normalize(torch.rand(2, 3), dim=-1) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - local_nn = Seq(Lin(16 + 4, 32), ReLU(), Lin(32, 32)) - global_nn = Seq(Lin(32, 32)) - conv = PPFConv(local_nn, global_nn) - assert str(conv) == ( - 'PPFConv(local_nn=Sequential(\n' - ' (0): Linear(in_features=20, out_features=32, bias=True)\n' - ' (1): ReLU()\n' - ' (2): Linear(in_features=32, out_features=32, bias=True)\n' - '), global_nn=Sequential(\n' - ' (0): Linear(in_features=32, out_features=32, bias=True)\n' - '))') - out = conv(x1, pos1, n1, edge_index) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, pos1, n1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, pos1, n1, adj2.t()), out, atol=1e-6) - - if is_full_test(): - t = '(OptTensor, Tensor, Tensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, pos1, n1, edge_index), out, atol=1e-6) - - t = '(OptTensor, Tensor, Tensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, pos1, n1, adj1.t()), out, atol=1e-6) - - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() - out = conv(x1, (pos1, pos2), (n1, n2), edge_index) - assert out.size() == (2, 32) - assert torch.allclose(conv((x1, None), (pos1, pos2), (n1, n2), edge_index), - out, atol=1e-6) - assert torch.allclose(conv(x1, (pos1, pos2), (n1, n2), adj1.t()), out, - atol=1e-6) - assert torch.allclose(conv(x1, (pos1, pos2), (n1, n2), adj2.t()), out, - atol=1e-6) - assert torch.allclose(conv((x1, None), (pos1, pos2), (n1, n2), adj1.t()), - out, atol=1e-6) - assert torch.allclose(conv((x1, None), (pos1, pos2), (n1, n2), adj2.t()), - out, atol=1e-6) - - if is_full_test(): - t = '(PairOptTensor, PairTensor, PairTensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose( - jit((x1, None), (pos1, pos2), (n1, n2), edge_index), - out, - atol=1e-6, - ) - - t = '(PairOptTensor, PairTensor, PairTensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose( - jit((x1, None), (pos1, pos2), (n1, n2), adj1.t()), out, atol=1e-6) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_res_gated_graph_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_res_gated_graph_conv.py deleted file mode 100644 index eb71900..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_res_gated_graph_conv.py +++ /dev/null @@ -1,48 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import ResGatedGraphConv -from torch_geometric.testing import is_full_test - - -def test_res_gated_graph_conv(): - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 32) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - conv = ResGatedGraphConv(8, 32) - assert str(conv) == 'ResGatedGraphConv(8, 32)' - out = conv(x1, edge_index) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index), out, atol=1e-6) - - t = '(Tensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out, atol=1e-6) - - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() - conv = ResGatedGraphConv((8, 32), 32) - assert str(conv) == 'ResGatedGraphConv((8, 32), 32)' - out = conv((x1, x2), edge_index) - assert out.size() == (2, 32) - assert torch.allclose(conv((x1, x2), adj1.t()), out, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out, atol=1e-6) - - if is_full_test(): - t = '(PairTensor, Tensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index), out, atol=1e-6) - - t = '(PairTensor, SparseTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out, atol=1e-6) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_rgat_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_rgat_conv.py deleted file mode 100644 index b3c1afa..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_rgat_conv.py +++ /dev/null @@ -1,63 +0,0 @@ -import pytest -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import RGATConv -from torch_geometric.testing import is_full_test - - -@pytest.mark.parametrize('mod', [ - 'additive', - 'scaled', - 'f-additive', - 'f-scaled', -]) -@pytest.mark.parametrize('attention_mechanism', [ - 'within-relation', - 'across-relation', -]) -@pytest.mark.parametrize('attention_mode', [ - 'additive-self-attention', - 'multiplicative-self-attention', -]) -def test_rgat_conv(mod, attention_mechanism, attention_mode): - x = torch.randn(4, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - edge_type = torch.tensor([0, 2, 1, 2]) - edge_attr = torch.randn((4, 8)) - - conv = RGATConv(8, 20, num_relations=4, num_bases=4, mod=mod, - attention_mechanism=attention_mechanism, - attention_mode=attention_mode, heads=2, dim=1, edge_dim=8) - assert str(conv) == 'RGATConv(8, 20, heads=2)' - - out = conv(x, edge_index, edge_type, edge_attr) - assert out.size() == (4, 40) - - -def test_rgat_conv_jittable(): - x = torch.randn(4, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - edge_attr = torch.randn((4, 8)) - adj1 = SparseTensor(row=row, col=col, value=edge_attr, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_coo_tensor() - edge_type = torch.tensor([0, 2, 1, 2]) - - conv = RGATConv(8, 20, num_relations=4, num_bases=4, mod='additive', - attention_mechanism='across-relation', - attention_mode='additive-self-attention', heads=2, dim=1, - edge_dim=8, bias=False) - - out = conv(x, edge_index, edge_type, edge_attr) - assert out.size() == (4, 40) - assert torch.allclose(conv(x, adj1.t(), edge_type), out) - # t() expects a tensor with <= 2 sparse and 0 dense dimensions - adj2_t = adj2.transpose(0, 1).coalesce() - assert torch.allclose(conv(x, adj2_t, edge_type), out) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor, OptTensor, Size, NoneType) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, edge_index, edge_type), - conv(x, edge_index, edge_type)) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_rgcn_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_rgcn_conv.py deleted file mode 100644 index 8bfd6b8..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_rgcn_conv.py +++ /dev/null @@ -1,111 +0,0 @@ -import pytest -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import FastRGCNConv, RGCNConv -from torch_geometric.testing import is_full_test - -classes = [RGCNConv, FastRGCNConv] -confs = [(None, None), (2, None), (None, 2)] - - -@pytest.mark.parametrize('conf', confs) -def test_rgcn_conv_equality(conf): - num_bases, num_blocks = conf - - x1 = torch.randn(4, 4) - edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [0, 0, 1, 0, 1, 1]]) - edge_type = torch.tensor([0, 1, 1, 0, 0, 1]) - - edge_index = torch.tensor([ - [0, 1, 1, 2, 2, 3, 0, 1, 1, 2, 2, 3], - [0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1], - ]) - edge_type = torch.tensor([0, 1, 1, 0, 0, 1, 2, 3, 3, 2, 2, 3]) - - torch.manual_seed(12345) - conv1 = RGCNConv(4, 32, 4, num_bases, num_blocks, aggr='sum') - - torch.manual_seed(12345) - conv2 = FastRGCNConv(4, 32, 4, num_bases, num_blocks, aggr='sum') - - out1 = conv1(x1, edge_index, edge_type) - out2 = conv2(x1, edge_index, edge_type) - assert torch.allclose(out1, out2, atol=1e-6) - - if num_blocks is None: - out1 = conv1(None, edge_index, edge_type) - out2 = conv2(None, edge_index, edge_type) - assert torch.allclose(out1, out2, atol=1e-6) - - -@pytest.mark.parametrize('cls', classes) -@pytest.mark.parametrize('conf', confs) -def test_rgcn_conv(cls, conf): - num_bases, num_blocks = conf - - x1 = torch.randn(4, 4) - x2 = torch.randn(2, 16) - idx1 = torch.arange(4) - idx2 = torch.arange(2) - edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [0, 0, 1, 0, 1, 1]]) - edge_type = torch.tensor([0, 1, 1, 0, 0, 1]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, value=edge_type, sparse_sizes=(4, 4)) - - conv = cls(4, 32, 2, num_bases, num_blocks, aggr='sum') - assert str(conv) == f'{cls.__name__}(4, 32, num_relations=2)' - out1 = conv(x1, edge_index, edge_type) - assert out1.size() == (4, 32) - assert torch.allclose(conv(x1, adj.t()), out1, atol=1e-6) - - if num_blocks is None: - out2 = conv(None, edge_index, edge_type) - assert out2.size() == (4, 32) - assert torch.allclose(conv(None, adj.t()), out2, atol=1e-6) - - if is_full_test(): - t = '(OptTensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index, edge_type), out1) - if num_blocks is None: - assert torch.allclose(jit(idx1, edge_index, edge_type), out2) - assert torch.allclose(jit(None, edge_index, edge_type), out2) - - t = '(OptTensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj.t()), out1) - if num_blocks is None: - assert torch.allclose(jit(idx1, adj.t()), out2, atol=1e-6) - assert torch.allclose(jit(None, adj.t()), out2, atol=1e-6) - - adj = adj.sparse_resize((4, 2)) - conv = cls((4, 16), 32, 2, num_bases, num_blocks, aggr='sum') - assert str(conv) == f'{cls.__name__}((4, 16), 32, num_relations=2)' - out1 = conv((x1, x2), edge_index, edge_type) - assert out1.size() == (2, 32) - assert torch.allclose(conv((x1, x2), adj.t()), out1, atol=1e-6) - - if num_blocks is None: - out2 = conv((None, idx2), edge_index, edge_type) - assert out2.size() == (2, 32) - assert torch.allclose(conv((idx1, idx2), edge_index, edge_type), out2) - assert torch.allclose(conv((None, idx2), adj.t()), out2, atol=1e-6) - assert torch.allclose(conv((idx1, idx2), adj.t()), out2, atol=1e-6) - - if is_full_test(): - t = '(Tuple[OptTensor, Tensor], Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index, edge_type), out1) - if num_blocks is None: - assert torch.allclose(jit((None, idx2), edge_index, edge_type), - out2) - assert torch.allclose(jit((idx1, idx2), edge_index, edge_type), - out2) - - t = '(Tuple[OptTensor, Tensor], SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj.t()), out1, atol=1e-6) - if num_blocks is None: - assert torch.allclose(jit((None, idx2), adj.t()), out2, atol=1e-6) - assert torch.allclose(jit((idx1, idx2), adj.t()), out2, atol=1e-6) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_sage_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_sage_conv.py deleted file mode 100644 index 3463dd4..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_sage_conv.py +++ /dev/null @@ -1,100 +0,0 @@ -import pytest -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import SAGEConv -from torch_geometric.testing import is_full_test - - -@pytest.mark.parametrize('project', [False, True]) -@pytest.mark.parametrize('aggr', ['mean', 'sum']) -def test_sage_conv(project, aggr): - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - conv = SAGEConv(8, 32, project=project, aggr=aggr) - assert str(conv) == f'SAGEConv(8, 32, aggr={aggr})' - out = conv(x1, edge_index) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, size=(4, 4)), out, atol=1e-6) - assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index), out, atol=1e-6) - assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out, atol=1e-6) - - t = '(Tensor, SparseTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out, atol=1e-6) - - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() - conv = SAGEConv((8, 16), 32, project=project, aggr=aggr) - assert str(conv) == f'SAGEConv((8, 16), 32, aggr={aggr})' - out1 = conv((x1, x2), edge_index) - out2 = conv((x1, None), edge_index, (4, 2)) - assert out1.size() == (2, 32) - assert out2.size() == (2, 32) - assert torch.allclose(conv((x1, x2), edge_index, (4, 2)), out1, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, None), adj1.t()), out2, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out1, atol=1e-6) - assert torch.allclose(conv((x1, None), adj2.t()), out2, atol=1e-6) - - if is_full_test(): - t = '(OptPairTensor, Tensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index), out1, atol=1e-6) - assert torch.allclose(jit((x1, x2), edge_index, size=(4, 2)), out1, - atol=1e-6) - assert torch.allclose(jit((x1, None), edge_index, size=(4, 2)), out2, - atol=1e-6) - - t = '(OptPairTensor, SparseTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit((x1, None), adj1.t()), out2, atol=1e-6) - - -def test_lstm_aggr_sage_conv(): - x = torch.randn(4, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - - conv = SAGEConv(8, 32, aggr='lstm') - assert str(conv) == 'SAGEConv(8, 32, aggr=lstm)' - out = conv(x, edge_index) - assert out.size() == (4, 32) - assert torch.allclose(conv(x, adj.t()), out) - - edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 1, 0]]) - with pytest.raises(ValueError, match="'index' tensor is not sorted"): - conv(x, edge_index) - - -@pytest.mark.parametrize('aggr_kwargs', [ - dict(mode='cat'), - dict(mode='proj', mode_kwargs=dict(in_channels=8, out_channels=16)), - dict(mode='attn', mode_kwargs=dict(in_channels=8, out_channels=16, - num_heads=4)), - dict(mode='sum'), -]) -def test_multi_aggr_sage_conv(aggr_kwargs): - x = torch.randn(4, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - aggr_kwargs['aggrs_kwargs'] = [{}, {}, {}, dict(learn=True, t=1)] - conv = SAGEConv(8, 32, aggr=['mean', 'max', 'sum', 'softmax'], - aggr_kwargs=aggr_kwargs) - out = conv(x, edge_index) - assert out.size() == (4, 32) - assert torch.allclose(conv(x, adj.t()), out) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_sg_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_sg_conv.py deleted file mode 100644 index d35451d..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_sg_conv.py +++ /dev/null @@ -1,44 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import SGConv -from torch_geometric.testing import is_full_test - - -def test_sg_conv(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - value = torch.rand(row.size(0)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() - - conv = SGConv(16, 32, K=10) - assert str(conv) == 'SGConv(16, 32, K=10)' - out1 = conv(x, edge_index) - assert out1.size() == (4, 32) - assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) - out2 = conv(x, edge_index, value) - assert out2.size() == (4, 32) - assert torch.allclose(conv(x, adj2.t()), out2, atol=1e-6) - assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out1.tolist() - assert jit(x, edge_index, value).tolist() == out2.tolist() - - t = '(Tensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit(x, adj2.t()), out2, atol=1e-6) - - conv.cached = True - conv(x, edge_index) - assert conv(x, edge_index).tolist() == out1.tolist() - assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_signed_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_signed_conv.py deleted file mode 100644 index d4d006e..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_signed_conv.py +++ /dev/null @@ -1,70 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import SignedConv -from torch_geometric.testing import is_full_test - - -def test_signed_conv(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - conv1 = SignedConv(16, 32, first_aggr=True) - assert str(conv1) == 'SignedConv(16, 32, first_aggr=True)' - - conv2 = SignedConv(32, 48, first_aggr=False) - assert str(conv2) == 'SignedConv(32, 48, first_aggr=False)' - - out1 = conv1(x, edge_index, edge_index) - assert out1.size() == (4, 64) - assert torch.allclose(conv1(x, adj1.t(), adj1.t()), out1) - assert torch.allclose(conv1(x, adj2.t(), adj2.t()), out1) - - out2 = conv2(out1, edge_index, edge_index) - assert out2.size() == (4, 96) - assert torch.allclose(conv2(out1, adj1.t(), adj1.t()), out2) - assert torch.allclose(conv2(out1, adj2.t(), adj2.t()), out2) - - if is_full_test(): - t = '(Tensor, Tensor, Tensor) -> Tensor' - jit1 = torch.jit.script(conv1.jittable(t)) - jit2 = torch.jit.script(conv2.jittable(t)) - assert torch.allclose(jit1(x, edge_index, edge_index), out1) - assert torch.allclose(jit2(out1, edge_index, edge_index), out2) - - t = '(Tensor, SparseTensor, SparseTensor) -> Tensor' - jit1 = torch.jit.script(conv1.jittable(t)) - jit2 = torch.jit.script(conv2.jittable(t)) - assert torch.allclose(jit1(x, adj1.t(), adj1.t()), out1) - assert torch.allclose(jit2(out1, adj1.t(), adj1.t()), out2) - - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() - assert torch.allclose(conv1((x, x[:2]), edge_index, edge_index), out1[:2]) - assert torch.allclose(conv1((x, x[:2]), adj1.t(), adj1.t()), out1[:2]) - assert torch.allclose(conv1((x, x[:2]), adj2.t(), adj2.t()), out1[:2]) - assert torch.allclose(conv2((out1, out1[:2]), edge_index, edge_index), - out2[:2], atol=1e-6) - assert torch.allclose(conv2((out1, out1[:2]), adj1.t(), adj1.t()), - out2[:2], atol=1e-6) - assert torch.allclose(conv2((out1, out1[:2]), adj2.t(), adj2.t()), - out2[:2], atol=1e-6) - - if is_full_test(): - t = '(PairTensor, Tensor, Tensor) -> Tensor' - jit1 = torch.jit.script(conv1.jittable(t)) - jit2 = torch.jit.script(conv2.jittable(t)) - assert torch.allclose(jit1((x, x[:2]), edge_index, edge_index), - out1[:2], atol=1e-6) - assert torch.allclose(jit2((out1, out1[:2]), edge_index, edge_index), - out2[:2], atol=1e-6) - - t = '(PairTensor, SparseTensor, SparseTensor) -> Tensor' - jit1 = torch.jit.script(conv1.jittable(t)) - jit2 = torch.jit.script(conv2.jittable(t)) - assert torch.allclose(jit1((x, x[:2]), adj1.t(), adj1.t()), out1[:2]) - assert torch.allclose(jit2((out1, out1[:2]), adj1.t(), adj1.t()), - out2[:2], atol=1e-6) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_simple_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_simple_conv.py deleted file mode 100644 index 4acad77..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_simple_conv.py +++ /dev/null @@ -1,47 +0,0 @@ -import pytest -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import SimpleConv -from torch_geometric.testing import is_full_test - - -@pytest.mark.parametrize('aggr, combine_root', [ - ('mean', None), - ('sum', 'sum'), - (['mean', 'max'], 'cat'), -]) -def test_simple_conv(aggr, combine_root): - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - - conv = SimpleConv(aggr, combine_root) - assert str(conv) == 'SimpleConv()' - - num_aggrs = 1 if isinstance(aggr, str) else len(aggr) - output_size = sum([8] * num_aggrs) + (8 if combine_root == 'cat' else 0) - - out = conv(x1, edge_index) - assert out.size() == (4, output_size) - assert torch.allclose(conv(x1, edge_index, size=(4, 4)), out) - assert torch.allclose(conv(x1, adj.t()), out) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index), out) - assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out) - - t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj.t()), out) - - adj = adj.sparse_resize((4, 2)) - - out = conv((x1, x2), edge_index) - assert out.size() == (2, output_size) - assert torch.allclose(conv((x1, x2), edge_index, size=(4, 2)), out) - assert torch.allclose(conv((x1, x2), adj.t()), out) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_spline_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_spline_conv.py deleted file mode 100644 index 488c2d4..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_spline_conv.py +++ /dev/null @@ -1,81 +0,0 @@ -import warnings - -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import SplineConv -from torch_geometric.testing import is_full_test, withPackage - - -@withPackage('torch_spline_conv') -def test_spline_conv(): - warnings.filterwarnings('ignore', '.*non-optimized CPU version.*') - - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - value = torch.rand(row.size(0), 3) - adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - - conv = SplineConv(8, 32, dim=3, kernel_size=5) - assert str(conv) == 'SplineConv(8, 32, dim=3)' - out = conv(x1, edge_index, value) - assert out.size() == (4, 32) - assert torch.allclose(conv(x1, edge_index, value, size=(4, 4)), out) - assert torch.allclose(conv(x1, adj.t()), out) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index, value), out) - assert torch.allclose(jit(x1, edge_index, value, size=(4, 4)), out) - - t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj.t()), out) - - adj = adj.sparse_resize((4, 2)) - conv = SplineConv((8, 16), 32, dim=3, kernel_size=5) - assert str(conv) == 'SplineConv((8, 16), 32, dim=3)' - out1 = conv((x1, x2), edge_index, value) - out2 = conv((x1, None), edge_index, value, (4, 2)) - assert out1.size() == (2, 32) - assert out2.size() == (2, 32) - assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out1) - assert torch.allclose(conv((x1, x2), adj.t()), out1) - assert torch.allclose(conv((x1, None), adj.t()), out2) - - if is_full_test(): - t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index, value), out1) - assert torch.allclose(jit((x1, x2), edge_index, value, size=(4, 2)), - out1) - assert torch.allclose(jit((x1, None), edge_index, value, size=(4, 2)), - out2) - - t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj.t()), out1) - assert torch.allclose(jit((x1, None), adj.t()), out2) - - -@withPackage('torch_spline_conv') -def test_lazy_spline_conv(): - warnings.filterwarnings('ignore', '.*non-optimized CPU version.*') - - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - value = torch.rand(edge_index.size(1), 3) - - conv = SplineConv(-1, 32, dim=3, kernel_size=5) - assert str(conv) == 'SplineConv(-1, 32, dim=3)' - out = conv(x1, edge_index, value) - assert out.size() == (4, 32) - - conv = SplineConv((-1, -1), 32, dim=3, kernel_size=5) - assert str(conv) == 'SplineConv((-1, -1), 32, dim=3)' - out = conv((x1, x2), edge_index, value) - assert out.size() == (2, 32) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_ssg_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_ssg_conv.py deleted file mode 100644 index f44b0d2..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_ssg_conv.py +++ /dev/null @@ -1,44 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import SSGConv -from torch_geometric.testing import is_full_test - - -def test_ssg_conv(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - value = torch.rand(row.size(0)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() - - conv = SSGConv(16, 32, alpha=0.1, K=10) - assert str(conv) == 'SSGConv(16, 32, K=10, alpha=0.1)' - out1 = conv(x, edge_index) - assert out1.size() == (4, 32) - assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) - out2 = conv(x, edge_index, value) - assert out2.size() == (4, 32) - assert torch.allclose(conv(x, adj2.t()), out2, atol=1e-6) - assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out1.tolist() - assert jit(x, edge_index, value).tolist() == out2.tolist() - - t = '(Tensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit(x, adj2.t()), out2, atol=1e-6) - - conv.cached = True - conv(x, edge_index) - assert conv(x, edge_index).tolist() == out1.tolist() - assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_tag_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_tag_conv.py deleted file mode 100644 index a74d6c2..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_tag_conv.py +++ /dev/null @@ -1,47 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import TAGConv -from torch_geometric.testing import is_full_test - - -def test_tag_conv(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - row, col = edge_index - value = torch.rand(row.size(0)) - adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) - adj3 = adj1.to_torch_sparse_csc_tensor() - adj4 = adj2.to_torch_sparse_csc_tensor() - - conv = TAGConv(16, 32) - assert str(conv) == 'TAGConv(16, 32, K=3)' - out1 = conv(x, edge_index) - assert out1.size() == (4, 32) - assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(conv(x, adj3.t()), out1, atol=1e-6) - out2 = conv(x, edge_index, value) - assert out2.size() == (4, 32) - assert torch.allclose(conv(x, adj2.t()), out2, atol=1e-6) - assert torch.allclose(conv(x, adj4.t()), out2, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert jit(x, edge_index).tolist() == out1.tolist() - assert jit(x, edge_index, value).tolist() == out2.tolist() - - t = '(Tensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out1, atol=1e-6) - assert torch.allclose(jit(x, adj2.t()), out2, atol=1e-6) - - -def test_static_tag_conv(): - x = torch.randn(3, 4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - - conv = TAGConv(16, 32) - out = conv(x, edge_index) - assert out.size() == (3, 4, 32) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_transformer_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_transformer_conv.py deleted file mode 100644 index 48ee0a3..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_transformer_conv.py +++ /dev/null @@ -1,81 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import TransformerConv -from torch_geometric.testing import is_full_test - - -def test_transformer_conv(): - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 16) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - row, col = edge_index - adj1 = SparseTensor(row=row, col=col, sparse_sizes=(4, 4)) - adj2 = adj1.to_torch_sparse_csc_tensor() - - conv = TransformerConv(8, 32, heads=2, beta=True) - assert str(conv) == 'TransformerConv(8, 32, heads=2)' - out = conv(x1, edge_index) - assert out.size() == (4, 64) - assert torch.allclose(conv(x1, adj1.t()), out, atol=1e-6) - assert torch.allclose(conv(x1, adj2.t()), out, atol=1e-6) - - if is_full_test(): - t = '(Tensor, Tensor, NoneType, NoneType) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, edge_index), out) - - t = '(Tensor, SparseTensor, NoneType, NoneType) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x1, adj1.t()), out, atol=1e-6) - - # Test `return_attention_weights`. - result = conv(x1, edge_index, return_attention_weights=True) - assert torch.allclose(result[0], out) - assert result[1][0].size() == (2, 4) - assert result[1][1].size() == (4, 2) - assert result[1][1].min() >= 0 and result[1][1].max() <= 1 - assert conv._alpha is None - - result = conv(x1, adj1.t(), return_attention_weights=True) - assert torch.allclose(result[0], out, atol=1e-6) - assert result[1].sizes() == [4, 4, 2] and result[1].nnz() == 4 - assert conv._alpha is None - - if is_full_test(): - t = ('(Tensor, Tensor, NoneType, bool) -> ' - 'Tuple[Tensor, Tuple[Tensor, Tensor]]') - jit = torch.jit.script(conv.jittable(t)) - result = jit(x1, edge_index, return_attention_weights=True) - assert torch.allclose(result[0], out) - assert result[1][0].size() == (2, 4) - assert result[1][1].size() == (4, 2) - assert result[1][1].min() >= 0 and result[1][1].max() <= 1 - assert conv._alpha is None - - t = ('(Tensor, SparseTensor, NoneType, bool) -> ' - 'Tuple[Tensor, SparseTensor]') - jit = torch.jit.script(conv.jittable(t)) - result = jit(x1, adj1.t(), return_attention_weights=True) - assert torch.allclose(result[0], out, atol=1e-6) - assert result[1].sizes() == [4, 4, 2] and result[1].nnz() == 4 - assert conv._alpha is None - - adj1 = adj1.sparse_resize((4, 2)) - adj2 = adj1.to_torch_sparse_csc_tensor() - conv = TransformerConv((8, 16), 32, heads=2, beta=True) - assert str(conv) == 'TransformerConv((8, 16), 32, heads=2)' - - out = conv((x1, x2), edge_index) - assert out.size() == (2, 64) - assert torch.allclose(conv((x1, x2), adj1.t()), out, atol=1e-6) - assert torch.allclose(conv((x1, x2), adj2.t()), out, atol=1e-6) - - if is_full_test(): - t = '(PairTensor, Tensor, NoneType, NoneType) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), edge_index), out) - - t = '(PairTensor, SparseTensor, NoneType, NoneType) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit((x1, x2), adj1.t()), out, atol=1e-6) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_wl_conv.py b/pytorch_geometric-2.3.1/test/nn/conv/test_wl_conv.py deleted file mode 100644 index 889e578..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_wl_conv.py +++ /dev/null @@ -1,28 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import WLConv -from torch_geometric.utils import one_hot - - -def test_wl_conv(): - x1 = torch.tensor([1, 0, 0, 1]) - x2 = one_hot(x1) - edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]]) - adj1 = SparseTensor.from_edge_index(edge_index) - adj2 = adj1.to_torch_sparse_csc_tensor() - - conv = WLConv() - assert str(conv) == 'WLConv()' - - out = conv(x1, edge_index) - assert out.tolist() == [0, 1, 1, 0] - assert torch.equal(conv(x2, edge_index), out) - assert torch.equal(conv(x1, adj1.t()), out) - assert torch.equal(conv(x1, adj2.t()), out) - assert torch.equal(conv(x2, adj1.t()), out) - assert torch.equal(conv(x2, adj2.t()), out) - - assert conv.histogram(out).tolist() == [[2, 2]] - assert torch.allclose(conv.histogram(out, norm=True), - torch.tensor([[0.7071, 0.7071]])) diff --git a/pytorch_geometric-2.3.1/test/nn/conv/test_wl_conv_continuous.py b/pytorch_geometric-2.3.1/test/nn/conv/test_wl_conv_continuous.py deleted file mode 100644 index 4080da8..0000000 --- a/pytorch_geometric-2.3.1/test/nn/conv/test_wl_conv_continuous.py +++ /dev/null @@ -1,31 +0,0 @@ -import torch - -from torch_geometric.nn import WLConvContinuous -from torch_geometric.testing import is_full_test - - -def test_wl_conv(): - edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]], dtype=torch.long) - x = torch.tensor([[-1], [0], [1]], dtype=torch.float) - - conv = WLConvContinuous() - assert str(conv) == 'WLConvContinuous()' - - out = conv(x, edge_index) - assert out.tolist() == [[-0.5], [0.0], [0.5]] - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor, Size) -> Tensor' - jit = torch.jit.script(conv.jittable(t)) - assert torch.allclose(jit(x, edge_index), out) - - x1 = torch.randn(4, 8) - x2 = torch.randn(2, 8) - edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]]) - edge_weight = torch.randn(edge_index.size(1)) - - out = conv((x1, None), edge_index, edge_weight, size=(4, 2)) - assert out.size() == (2, 8) - - out = conv((x1, x2), edge_index, edge_weight) - assert out.size() == (2, 8) diff --git a/pytorch_geometric-2.3.1/test/nn/dense/test_diff_pool.py b/pytorch_geometric-2.3.1/test/nn/dense/test_diff_pool.py deleted file mode 100644 index 051f5f4..0000000 --- a/pytorch_geometric-2.3.1/test/nn/dense/test_diff_pool.py +++ /dev/null @@ -1,26 +0,0 @@ -import torch - -from torch_geometric.nn import dense_diff_pool -from torch_geometric.testing import is_full_test - - -def test_dense_diff_pool(): - batch_size, num_nodes, channels, num_clusters = (2, 20, 16, 10) - x = torch.randn((batch_size, num_nodes, channels)) - adj = torch.rand((batch_size, num_nodes, num_nodes)) - s = torch.randn((batch_size, num_nodes, num_clusters)) - mask = torch.randint(0, 2, (batch_size, num_nodes), dtype=torch.bool) - - x_out, adj_out, link_loss, ent_loss = dense_diff_pool(x, adj, s, mask) - assert x_out.size() == (2, 10, 16) - assert adj_out.size() == (2, 10, 10) - assert link_loss.item() >= 0 - assert ent_loss.item() >= 0 - - if is_full_test(): - jit = torch.jit.script(dense_diff_pool) - x_jit, adj_jit, link_loss, ent_loss = jit(x, adj, s, mask) - assert torch.allclose(x_jit, x_out) - assert torch.allclose(adj_jit, adj_out) - assert link_loss.item() >= 0 - assert ent_loss.item() >= 0 diff --git a/pytorch_geometric-2.3.1/test/nn/dense/test_linear.py b/pytorch_geometric-2.3.1/test/nn/dense/test_linear.py deleted file mode 100644 index e3b04cd..0000000 --- a/pytorch_geometric-2.3.1/test/nn/dense/test_linear.py +++ /dev/null @@ -1,178 +0,0 @@ -import copy - -import pytest -import torch -from torch.nn import Linear as PTLinear -from torch.nn.parameter import UninitializedParameter - -from torch_geometric.nn import HeteroDictLinear, HeteroLinear, Linear -from torch_geometric.testing import is_full_test, withPackage - -weight_inits = ['glorot', 'kaiming_uniform', None] -bias_inits = ['zeros', None] - - -@pytest.mark.parametrize('weight', weight_inits) -@pytest.mark.parametrize('bias', bias_inits) -def test_linear(weight, bias): - x = torch.randn(3, 4, 16) - lin = Linear(16, 32, weight_initializer=weight, bias_initializer=bias) - assert str(lin) == 'Linear(16, 32, bias=True)' - assert lin(x).size() == (3, 4, 32) - - -@pytest.mark.parametrize('weight', weight_inits) -@pytest.mark.parametrize('bias', bias_inits) -def test_lazy_linear(weight, bias): - x = torch.randn(3, 4, 16) - lin = Linear(-1, 32, weight_initializer=weight, bias_initializer=bias) - assert str(lin) == 'Linear(-1, 32, bias=True)' - assert lin(x).size() == (3, 4, 32) - assert str(lin) == 'Linear(16, 32, bias=True)' - - -@pytest.mark.parametrize('dim1', [-1, 16]) -@pytest.mark.parametrize('dim2', [-1, 16]) -def test_load_lazy_linear(dim1, dim2): - lin1 = Linear(dim1, 32) - lin2 = Linear(dim1, 32) - lin2.load_state_dict(lin1.state_dict()) - - if dim1 != -1: - assert torch.allclose(lin1.weight, lin2.weight) - assert torch.allclose(lin1.bias, lin2.bias) - assert not hasattr(lin1, '_hook') - assert not hasattr(lin2, '_hook') - else: - assert isinstance(lin1.weight, UninitializedParameter) - assert isinstance(lin2.weight, UninitializedParameter) - assert hasattr(lin1, '_hook') - assert hasattr(lin2, '_hook') - - with pytest.raises(RuntimeError, match="in state_dict"): - lin1.load_state_dict({}, strict=True) - lin1.load_state_dict({}, strict=False) - - -@pytest.mark.parametrize('lazy', [True, False]) -def test_identical_linear_default_initialization(lazy): - x = torch.randn(3, 4, 16) - - torch.manual_seed(12345) - lin1 = Linear(-1 if lazy else 16, 32) - lin1(x) - - torch.manual_seed(12345) - lin2 = PTLinear(16, 32) - - assert lin1.weight.tolist() == lin2.weight.tolist() - assert lin1.bias.tolist() == lin2.bias.tolist() - assert lin1(x).tolist() == lin2(x).tolist() - - -@withPackage('torch<=1.12') -def test_copy_unintialized_parameter(): - weight = UninitializedParameter() - with pytest.raises(Exception): - copy.deepcopy(weight) - - -@pytest.mark.parametrize('lazy', [True, False]) -def test_copy_linear(lazy): - lin = Linear(-1 if lazy else 16, 32) - - copied_lin = copy.copy(lin) - assert id(copied_lin) != id(lin) - assert id(copied_lin.weight) == id(lin.weight) - if not isinstance(copied_lin.weight, UninitializedParameter): - assert copied_lin.weight.data_ptr() == lin.weight.data_ptr() - assert id(copied_lin.bias) == id(lin.bias) - assert copied_lin.bias.data_ptr() == lin.bias.data_ptr() - - copied_lin = copy.deepcopy(lin) - assert id(copied_lin) != id(lin) - assert id(copied_lin.weight) != id(lin.weight) - if not isinstance(copied_lin.weight, UninitializedParameter): - assert copied_lin.weight.data_ptr() != lin.weight.data_ptr() - assert torch.allclose(copied_lin.weight, lin.weight) - assert id(copied_lin.bias) != id(lin.bias) - assert copied_lin.bias.data_ptr() != lin.bias.data_ptr() - if int(torch.isnan(lin.bias).sum()) == 0: - assert torch.allclose(copied_lin.bias, lin.bias) - - -def test_hetero_linear(): - x = torch.randn(3, 16) - type_vec = torch.tensor([0, 1, 2]) - - lin = HeteroLinear(16, 32, num_types=3) - assert str(lin) == 'HeteroLinear(16, 32, num_types=3, bias=True)' - - out = lin(x, type_vec) - assert out.size() == (3, 32) - - if is_full_test(): - jit = torch.jit.script(lin) - assert torch.allclose(jit(x, type_vec), out) - - -def test_lazy_hetero_linear(): - x = torch.randn(3, 16) - type_vec = torch.tensor([0, 1, 2]) - - lin = HeteroLinear(-1, 32, num_types=3) - assert str(lin) == 'HeteroLinear(-1, 32, num_types=3, bias=True)' - - out = lin(x, type_vec) - assert out.size() == (3, 32) - - -def test_hetero_dict_linear(): - x_dict = {'v': torch.randn(3, 16), 'w': torch.randn(2, 8)} - - lin = HeteroDictLinear({'v': 16, 'w': 8}, 32) - assert str(lin) == "HeteroDictLinear({'v': 16, 'w': 8}, 32, bias=True)" - - out_dict = lin(x_dict) - assert len(out_dict) == 2 - assert out_dict['v'].size() == (3, 32) - assert out_dict['w'].size() == (2, 32) - - x_dict = {'v': torch.randn(3, 16), 'w': torch.randn(2, 16)} - - lin = HeteroDictLinear(16, 32, types=['v', 'w']) - assert str(lin) == "HeteroDictLinear({'v': 16, 'w': 16}, 32, bias=True)" - - out_dict = lin(x_dict) - assert len(out_dict) == 2 - assert out_dict['v'].size() == (3, 32) - assert out_dict['w'].size() == (2, 32) - - -def test_lazy_hetero_dict_linear(): - x_dict = {'v': torch.randn(3, 16), 'w': torch.randn(2, 8)} - - lin = HeteroDictLinear(-1, 32, types=['v', 'w']) - assert str(lin) == "HeteroDictLinear({'v': -1, 'w': -1}, 32, bias=True)" - - out_dict = lin(x_dict) - assert len(out_dict) == 2 - assert out_dict['v'].size() == (3, 32) - assert out_dict['w'].size() == (2, 32) - - -@withPackage('pyg_lib') -@pytest.mark.parametrize('type_vec', [ - torch.tensor([0, 0, 1, 1, 2, 2]), - torch.tensor([0, 1, 2, 0, 1, 2]), -]) -def test_hetero_linear_sort(type_vec): - x = torch.randn(type_vec.numel(), 16) - - lin = HeteroLinear(16, 32, num_types=3) - out = lin(x, type_vec) - - for i in range(type_vec.numel()): - node_type = int(type_vec[i]) - expected = x[i] @ lin.weight[node_type] + lin.bias[node_type] - assert torch.allclose(out[i], expected, atol=1e-6) diff --git a/pytorch_geometric-2.3.1/test/nn/models/test_correct_and_smooth.py b/pytorch_geometric-2.3.1/test/nn/models/test_correct_and_smooth.py deleted file mode 100644 index 2f4d630..0000000 --- a/pytorch_geometric-2.3.1/test/nn/models/test_correct_and_smooth.py +++ /dev/null @@ -1,44 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn.models import CorrectAndSmooth - - -def test_correct_and_smooth(): - y_soft = torch.tensor([0.1, 0.5, 0.4]).repeat(6, 1) - y_true = torch.tensor([1, 0, 0, 2, 1, 1]) - edge_index = torch.tensor([[0, 1, 1, 2, 4, 5], [1, 0, 2, 1, 5, 4]]) - adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(6, 6)).t() - mask = torch.randint(0, 2, (6, ), dtype=torch.bool) - - model = CorrectAndSmooth( - num_correction_layers=2, - correction_alpha=0.5, - num_smoothing_layers=2, - smoothing_alpha=0.5, - ) - assert str(model) == ('CorrectAndSmooth(\n' - ' correct: num_layers=2, alpha=0.5\n' - ' smooth: num_layers=2, alpha=0.5\n' - ' autoscale=True, scale=1.0\n' - ')') - - out = model.correct(y_soft, y_true[mask], mask, edge_index) - assert out.size() == (6, 3) - assert torch.allclose(out, model.correct(y_soft, y_true[mask], mask, adj)) - - out = model.smooth(y_soft, y_true[mask], mask, edge_index) - assert out.size() == (6, 3) - assert torch.allclose(out, model.smooth(y_soft, y_true[mask], mask, adj)) - - # Test without autoscale: - model = CorrectAndSmooth( - num_correction_layers=2, - correction_alpha=0.5, - num_smoothing_layers=2, - smoothing_alpha=0.5, - autoscale=False, - ) - out = model.correct(y_soft, y_true[mask], mask, edge_index) - assert out.size() == (6, 3) - assert torch.allclose(out, model.correct(y_soft, y_true[mask], mask, adj)) diff --git a/pytorch_geometric-2.3.1/test/nn/models/test_deep_graph_infomax.py b/pytorch_geometric-2.3.1/test/nn/models/test_deep_graph_infomax.py deleted file mode 100644 index a3e5fb4..0000000 --- a/pytorch_geometric-2.3.1/test/nn/models/test_deep_graph_infomax.py +++ /dev/null @@ -1,34 +0,0 @@ -import torch - -from torch_geometric.nn import DeepGraphInfomax -from torch_geometric.testing import is_full_test - - -def test_deep_graph_infomax(): - def corruption(z): - return z + 1 - - model = DeepGraphInfomax(hidden_channels=16, encoder=lambda x: x, - summary=lambda z, *args: z.mean(dim=0), - corruption=lambda x: x + 1) - - assert str(model) == 'DeepGraphInfomax(16)' - - x = torch.ones(20, 16) - - pos_z, neg_z, summary = model(x) - assert pos_z.size() == (20, 16) and neg_z.size() == (20, 16) - assert summary.size() == (16, ) - - if is_full_test(): - jit = torch.jit.export(model) - pos_z, neg_z, summary = jit(x) - assert pos_z.size() == (20, 16) and neg_z.size() == (20, 16) - assert summary.size() == (16, ) - - loss = model.loss(pos_z, neg_z, summary) - assert 0 <= loss.item() - - acc = model.test(torch.ones(20, 16), torch.randint(10, (20, )), - torch.ones(20, 16), torch.randint(10, (20, ))) - assert 0 <= acc and acc <= 1 diff --git a/pytorch_geometric-2.3.1/test/nn/models/test_graph_unet.py b/pytorch_geometric-2.3.1/test/nn/models/test_graph_unet.py deleted file mode 100644 index 80987be..0000000 --- a/pytorch_geometric-2.3.1/test/nn/models/test_graph_unet.py +++ /dev/null @@ -1,21 +0,0 @@ -import torch - -from torch_geometric.nn import GraphUNet -from torch_geometric.testing import is_full_test - - -def test_graph_unet(): - model = GraphUNet(16, 32, 8, depth=3) - out = 'GraphUNet(16, 32, 8, depth=3, pool_ratios=[0.5, 0.5, 0.5])' - assert str(model) == out - - x = torch.randn(3, 16) - edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - - out = model(x, edge_index) - assert out.size() == (3, 8) - - if is_full_test(): - jit = torch.jit.export(model) - out = jit(x, edge_index) - assert out.size() == (3, 8) diff --git a/pytorch_geometric-2.3.1/test/nn/models/test_label_prop.py b/pytorch_geometric-2.3.1/test/nn/models/test_label_prop.py deleted file mode 100644 index bc7d993..0000000 --- a/pytorch_geometric-2.3.1/test/nn/models/test_label_prop.py +++ /dev/null @@ -1,28 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn.models import LabelPropagation - - -def test_label_prop(): - y = torch.tensor([1, 0, 0, 2, 1, 1]) - edge_index = torch.tensor([[0, 1, 1, 2, 4, 5], [1, 0, 2, 1, 5, 4]]) - adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(6, 6)) - mask = torch.randint(0, 2, (6, ), dtype=torch.bool) - - model = LabelPropagation(num_layers=2, alpha=0.5) - assert str(model) == 'LabelPropagation(num_layers=2, alpha=0.5)' - - # Test without mask: - out = model(y, edge_index) - assert out.size() == (6, 3) - assert torch.allclose(model(y, adj.t()), out) - - # Test with mask: - out = model(y, edge_index, mask) - assert out.size() == (6, 3) - assert torch.allclose(model(y, adj.t(), mask), out) - - # Test post step: - out = model(y, edge_index, mask, post_step=lambda y: torch.zeros_like(y)) - assert torch.sum(out) == 0. diff --git a/pytorch_geometric-2.3.1/test/nn/models/test_lightgcn.py b/pytorch_geometric-2.3.1/test/nn/models/test_lightgcn.py deleted file mode 100644 index 4176b00..0000000 --- a/pytorch_geometric-2.3.1/test/nn/models/test_lightgcn.py +++ /dev/null @@ -1,59 +0,0 @@ -import pytest -import torch - -from torch_geometric.nn.models import LightGCN - - -@pytest.mark.parametrize('embedding_dim', [32, 64]) -@pytest.mark.parametrize('lambda_reg', [0, 1e-4]) -@pytest.mark.parametrize('alpha', [0, .25, torch.tensor([0.4, 0.3, 0.2])]) -def test_lightgcn_ranking(embedding_dim, lambda_reg, alpha): - N = 500 - edge_index = torch.randint(0, N, (2, 400), dtype=torch.int64) - edge_label_index = torch.randint(0, N, (2, 100), dtype=torch.int64) - - model = LightGCN(N, embedding_dim, num_layers=2, alpha=alpha) - assert str(model) == f'LightGCN(500, {embedding_dim}, num_layers=2)' - - pred = model(edge_index, edge_label_index) - assert pred.size() == (100, ) - - loss = model.recommendation_loss(pred[:50], pred[50:], lambda_reg) - assert loss.dim() == 0 and loss > 0 - - out = model.recommend(edge_index, k=2) - assert out.size() == (500, 2) - assert out.min() >= 0 and out.max() < 500 - - src_index = torch.arange(0, 250) - dst_index = torch.arange(250, 500) - - out = model.recommend(edge_index, src_index, dst_index, k=2) - assert out.size() == (250, 2) - assert out.min() >= 250 and out.max() < 500 - - -@pytest.mark.parametrize('embedding_dim', [32, 64]) -@pytest.mark.parametrize('alpha', [0, .25, torch.tensor([0.4, 0.3, 0.2])]) -def test_lightgcn_link_prediction(embedding_dim, alpha): - N = 500 - edge_index = torch.randint(0, N, (2, 400), dtype=torch.int64) - edge_label_index = torch.randint(0, N, (2, 100), dtype=torch.int64) - edge_label = torch.randint(0, 2, (edge_label_index.size(1), )) - - model = LightGCN(N, embedding_dim, num_layers=2, alpha=alpha) - assert str(model) == f'LightGCN(500, {embedding_dim}, num_layers=2)' - - pred = model(edge_index, edge_label_index) - assert pred.size() == (100, ) - - loss = model.link_pred_loss(pred, edge_label) - assert loss.dim() == 0 and loss > 0 - - prob = model.predict_link(edge_index, edge_label_index, prob=True) - assert prob.size() == (100, ) - assert prob.min() > 0 and prob.max() < 1 - - prob = model.predict_link(edge_index, edge_label_index, prob=False) - assert prob.size() == (100, ) - assert ((prob == 0) | (prob == 1)).sum() == 100 diff --git a/pytorch_geometric-2.3.1/test/nn/models/test_linkx.py b/pytorch_geometric-2.3.1/test/nn/models/test_linkx.py deleted file mode 100644 index 98e2578..0000000 --- a/pytorch_geometric-2.3.1/test/nn/models/test_linkx.py +++ /dev/null @@ -1,46 +0,0 @@ -import pytest -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import LINKX -from torch_geometric.testing import is_full_test - - -@pytest.mark.parametrize('num_edge_layers', [1, 2]) -def test_linkx(num_edge_layers): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 1, 2], [1, 2, 3]]) - edge_weight = torch.rand(edge_index.size(1)) - adj2 = SparseTensor.from_edge_index(edge_index, edge_weight, - sparse_sizes=(4, 4)) - adj1 = adj2.set_value(None) - - model = LINKX(num_nodes=4, in_channels=16, hidden_channels=32, - out_channels=8, num_layers=2, - num_edge_layers=num_edge_layers) - assert str(model) == 'LINKX(num_nodes=4, in_channels=16, out_channels=8)' - - out = model(x, edge_index) - assert out.size() == (4, 8) - assert torch.allclose(out, model(x, adj1.t()), atol=1e-4) - - if is_full_test(): - t = '(OptTensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(model.jittable(t)) - assert torch.allclose(jit(x, edge_index), out) - - t = '(OptTensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(model.jittable(t)) - assert torch.allclose(jit(x, adj1.t()), out) - - out = model(None, edge_index) - assert out.size() == (4, 8) - assert torch.allclose(out, model(None, adj1.t()), atol=1e-4) - - out = model(x, edge_index, edge_weight) - assert out.size() == (4, 8) - assert torch.allclose(out, model(x, adj2.t()), atol=1e-4) - - out = model(None, edge_index, edge_weight) - assert out.size() == (4, 8) - assert torch.allclose(out, model(None, adj2.t()), atol=1e-4) diff --git a/pytorch_geometric-2.3.1/test/nn/models/test_metapath2vec.py b/pytorch_geometric-2.3.1/test/nn/models/test_metapath2vec.py deleted file mode 100644 index 990f0f9..0000000 --- a/pytorch_geometric-2.3.1/test/nn/models/test_metapath2vec.py +++ /dev/null @@ -1,39 +0,0 @@ -import torch - -from torch_geometric.nn import MetaPath2Vec - - -def test_metapath2vec(): - edge_index_dict = { - ('author', 'writes', 'paper'): - torch.tensor([[0, 1, 1, 2], [0, 0, 1, 1]]), - ('paper', 'written_by', 'author'): - torch.tensor([[0, 0, 1, 1], [0, 1, 1, 2]]) - } - - metapath = [ - ('author', 'writes', 'paper'), - ('paper', 'written_by', 'author'), - ] - - model = MetaPath2Vec(edge_index_dict, embedding_dim=16, metapath=metapath, - walk_length=2, context_size=2) - assert str(model) == 'MetaPath2Vec(5, 16)' - - z = model('author') - assert z.size() == (3, 16) - - z = model('paper') - assert z.size() == (2, 16) - - z = model('author', torch.arange(2)) - assert z.size() == (2, 16) - - pos_rw, neg_rw = model._sample(torch.arange(3)) - - loss = model.loss(pos_rw, neg_rw) - assert 0 <= loss.item() - - acc = model.test(torch.ones(20, 16), torch.randint(10, (20, )), - torch.ones(20, 16), torch.randint(10, (20, ))) - assert 0 <= acc and acc <= 1 diff --git a/pytorch_geometric-2.3.1/test/nn/models/test_mlp.py b/pytorch_geometric-2.3.1/test/nn/models/test_mlp.py deleted file mode 100644 index cb28a89..0000000 --- a/pytorch_geometric-2.3.1/test/nn/models/test_mlp.py +++ /dev/null @@ -1,49 +0,0 @@ -import pytest -import torch - -from torch_geometric.nn import MLP -from torch_geometric.testing import is_full_test - - -@pytest.mark.parametrize('norm', ['batch_norm', None]) -@pytest.mark.parametrize('act_first', [False, True]) -@pytest.mark.parametrize('plain_last', [False, True]) -def test_mlp(norm, act_first, plain_last): - x = torch.randn(4, 16) - - torch.manual_seed(12345) - mlp = MLP( - [16, 32, 32, 64], - norm=norm, - act_first=act_first, - plain_last=plain_last, - ) - assert str(mlp) == 'MLP(16, 32, 32, 64)' - out = mlp(x) - assert out.size() == (4, 64) - - if is_full_test(): - jit = torch.jit.script(mlp) - assert torch.allclose(jit(x), out) - - torch.manual_seed(12345) - mlp = MLP( - 16, - hidden_channels=32, - out_channels=64, - num_layers=3, - norm=norm, - act_first=act_first, - plain_last=plain_last, - ) - assert torch.allclose(mlp(x), out) - - -@pytest.mark.parametrize('plain_last', [False, True]) -def test_fine_grained_mlp(plain_last): - mlp = MLP( - [16, 32, 32, 64], - dropout=[0.1, 0.2, 0.3], - bias=[False, True, False], - ) - assert mlp(torch.randn(4, 16)).size() == (4, 64) diff --git a/pytorch_geometric-2.3.1/test/nn/models/test_node2vec.py b/pytorch_geometric-2.3.1/test/nn/models/test_node2vec.py deleted file mode 100644 index 58ab8a2..0000000 --- a/pytorch_geometric-2.3.1/test/nn/models/test_node2vec.py +++ /dev/null @@ -1,30 +0,0 @@ -import torch - -from torch_geometric.nn import Node2Vec -from torch_geometric.testing import is_full_test, withPackage - - -@withPackage('torch_cluster') -def test_node2vec(): - edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - - model = Node2Vec(edge_index, embedding_dim=16, walk_length=2, - context_size=2) - assert str(model) == 'Node2Vec(3, 16)' - - assert model(torch.arange(3)).size() == (3, 16) - - pos_rw, neg_rw = model.sample(torch.arange(3)) - assert float(model.loss(pos_rw, neg_rw)) >= 0 - - acc = model.test(torch.ones(20, 16), torch.randint(10, (20, )), - torch.ones(20, 16), torch.randint(10, (20, ))) - assert 0 <= acc and acc <= 1 - - if is_full_test(): - jit = torch.jit.script(model) - - assert jit(torch.arange(3)).size() == (3, 16) - - pos_rw, neg_rw = jit.sample(torch.arange(3)) - assert float(jit.loss(pos_rw, neg_rw)) >= 0 diff --git a/pytorch_geometric-2.3.1/test/nn/models/test_rect.py b/pytorch_geometric-2.3.1/test/nn/models/test_rect.py deleted file mode 100644 index 09ef7ec..0000000 --- a/pytorch_geometric-2.3.1/test/nn/models/test_rect.py +++ /dev/null @@ -1,42 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.nn import RECT_L -from torch_geometric.testing import is_full_test - - -def test_rect(): - x = torch.randn(6, 8) - y = torch.tensor([1, 0, 0, 2, 1, 1]) - edge_index = torch.tensor([[0, 1, 1, 2, 4, 5], [1, 0, 2, 1, 5, 4]]) - adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(6, 6)) - mask = torch.randint(0, 2, (6, ), dtype=torch.bool) - - model = RECT_L(8, 16) - assert str(model) == 'RECT_L(8, 16)' - - out = model(x, edge_index) - assert out.size() == (6, 8) - assert torch.allclose(out, model(x, adj.t())) - - # Test `embed`: - embed_out = model.embed(x, edge_index) - assert embed_out.size() == (6, 16) - assert torch.allclose(embed_out, model.embed(x, adj.t())) - - # Test `get_semantic_labels`: - labeds_out = model.get_semantic_labels(x, y, mask) - assert labeds_out.size() == (int(mask.sum()), 8) - - if is_full_test(): - t = '(Tensor, Tensor, OptTensor) -> Tensor' - jit = torch.jit.script(model.jittable(t)) - assert torch.allclose(jit(x, edge_index), out) - assert torch.allclose(embed_out, jit.embed(x, edge_index)) - assert torch.allclose(labeds_out, jit.get_semantic_labels(x, y, mask)) - - t = '(Tensor, SparseTensor, OptTensor) -> Tensor' - jit = torch.jit.script(model.jittable(t)) - assert torch.allclose(jit(x, adj.t()), out) - assert torch.allclose(embed_out, jit.embed(x, adj.t())) - assert torch.allclose(labeds_out, jit.get_semantic_labels(x, y, mask)) diff --git a/pytorch_geometric-2.3.1/test/nn/norm/test_layer_norm.py b/pytorch_geometric-2.3.1/test/nn/norm/test_layer_norm.py deleted file mode 100644 index 3c17c16..0000000 --- a/pytorch_geometric-2.3.1/test/nn/norm/test_layer_norm.py +++ /dev/null @@ -1,56 +0,0 @@ -import pytest -import torch - -from torch_geometric.nn import HeteroLayerNorm, LayerNorm -from torch_geometric.testing import is_full_test, withCUDA - - -@withCUDA -@pytest.mark.parametrize('affine', [True, False]) -@pytest.mark.parametrize('mode', ['graph', 'node']) -def test_layer_norm(device, affine, mode): - x = torch.randn(100, 16, device=device) - batch = torch.zeros(100, dtype=torch.long, device=device) - - norm = LayerNorm(16, affine=affine, mode=mode).to(device) - assert str(norm) == f'LayerNorm(16, affine={affine}, mode={mode})' - - if is_full_test(): - torch.jit.script(norm) - - out1 = norm(x) - assert out1.size() == (100, 16) - assert torch.allclose(norm(x, batch), out1, atol=1e-6) - - out2 = norm(torch.cat([x, x], dim=0), torch.cat([batch, batch + 1], dim=0)) - assert torch.allclose(out1, out2[:100], atol=1e-6) - assert torch.allclose(out1, out2[100:], atol=1e-6) - - -@withCUDA -@pytest.mark.parametrize('affine', [False, True]) -def test_hetero_layer_norm(device, affine): - x = torch.randn((100, 16), device=device) - - # Test single type: - norm = LayerNorm(16, affine=affine, mode='node').to(device) - expected = norm(x) - - type_vec = torch.zeros(100, dtype=torch.long, device=device) - norm = HeteroLayerNorm(16, num_types=1, affine=affine).to(device) - assert str(norm) == 'HeteroLayerNorm(16, num_types=1)' - - out = norm(x, type_vec) - assert out.size() == (100, 16) - assert torch.allclose(out, expected) - - # Test multiple types: - type_vec = torch.randint(5, (100, ), device=device) - norm = HeteroLayerNorm(16, num_types=5, affine=affine).to(device) - out = norm(x, type_vec) - assert out.size() == (100, 16) - - mean = out.mean(dim=-1) - std = out.std(unbiased=False, dim=-1) - assert torch.allclose(mean, torch.zeros_like(mean), atol=1e-5) - assert torch.allclose(std, torch.ones_like(std), atol=1e-5) diff --git a/pytorch_geometric-2.3.1/test/nn/pool/test_avg_pool.py b/pytorch_geometric-2.3.1/test/nn/pool/test_avg_pool.py deleted file mode 100644 index af6cbe5..0000000 --- a/pytorch_geometric-2.3.1/test/nn/pool/test_avg_pool.py +++ /dev/null @@ -1,70 +0,0 @@ -import torch - -from torch_geometric.data import Batch -from torch_geometric.nn import avg_pool, avg_pool_neighbor_x, avg_pool_x -from torch_geometric.testing import is_full_test - - -def test_avg_pool_x(): - cluster = torch.tensor([0, 1, 0, 1, 2, 2]) - x = torch.Tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) - batch = torch.tensor([0, 0, 0, 0, 1, 1]) - - out = avg_pool_x(cluster, x, batch) - assert out[0].tolist() == [[3, 4], [5, 6], [10, 11]] - assert out[1].tolist() == [0, 0, 1] - - if is_full_test(): - jit = torch.jit.script(avg_pool_x) - out = jit(cluster, x, batch) - assert out[0].tolist() == [[3, 4], [5, 6], [10, 11]] - assert out[1].tolist() == [0, 0, 1] - - out, _ = avg_pool_x(cluster, x, batch, size=2) - assert out.tolist() == [[3, 4], [5, 6], [10, 11], [0, 0]] - - if is_full_test(): - jit = torch.jit.script(avg_pool_x) - out, _ = jit(cluster, x, batch, size=2) - assert out.tolist() == [[3, 4], [5, 6], [10, 11], [0, 0]] - - -def test_avg_pool(): - cluster = torch.tensor([0, 1, 0, 1, 2, 2]) - x = torch.Tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) - pos = torch.Tensor([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) - edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5], - [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 5, 4]]) - edge_attr = torch.Tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) - batch = torch.tensor([0, 0, 0, 0, 1, 1]) - - data = Batch(x=x, pos=pos, edge_index=edge_index, edge_attr=edge_attr, - batch=batch) - - data = avg_pool(cluster, data, transform=lambda x: x) - - assert data.x.tolist() == [[3, 4], [5, 6], [10, 11]] - assert data.pos.tolist() == [[1, 1], [2, 2], [4.5, 4.5]] - assert data.edge_index.tolist() == [[0, 1], [1, 0]] - assert data.edge_attr.tolist() == [4, 4] - assert data.batch.tolist() == [0, 0, 1] - - -def test_avg_pool_neighbor_x(): - x = torch.Tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) - edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5], - [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 5, 4]]) - batch = torch.tensor([0, 0, 0, 0, 1, 1]) - - data = Batch(x=x, edge_index=edge_index, batch=batch) - data = avg_pool_neighbor_x(data) - - assert data.x.tolist() == [ - [4, 5], - [4, 5], - [4, 5], - [4, 5], - [10, 11], - [10, 11], - ] - assert data.edge_index.tolist() == edge_index.tolist() diff --git a/pytorch_geometric-2.3.1/test/nn/pool/test_max_pool.py b/pytorch_geometric-2.3.1/test/nn/pool/test_max_pool.py deleted file mode 100644 index 506e6f0..0000000 --- a/pytorch_geometric-2.3.1/test/nn/pool/test_max_pool.py +++ /dev/null @@ -1,70 +0,0 @@ -import torch - -from torch_geometric.data import Batch -from torch_geometric.nn import max_pool, max_pool_neighbor_x, max_pool_x -from torch_geometric.testing import is_full_test - - -def test_max_pool_x(): - cluster = torch.tensor([0, 1, 0, 1, 2, 2]) - x = torch.Tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) - batch = torch.tensor([0, 0, 0, 0, 1, 1]) - - out = max_pool_x(cluster, x, batch) - assert out[0].tolist() == [[5, 6], [7, 8], [11, 12]] - assert out[1].tolist() == [0, 0, 1] - - if is_full_test(): - jit = torch.jit.script(max_pool_x) - out = jit(cluster, x, batch) - assert out[0].tolist() == [[5, 6], [7, 8], [11, 12]] - assert out[1].tolist() == [0, 0, 1] - - out, _ = max_pool_x(cluster, x, batch, size=2) - assert out.tolist() == [[5, 6], [7, 8], [11, 12], [0, 0]] - - if is_full_test(): - jit = torch.jit.script(max_pool_x) - out, _ = jit(cluster, x, batch, size=2) - assert out.tolist() == [[5, 6], [7, 8], [11, 12], [0, 0]] - - -def test_max_pool(): - cluster = torch.tensor([0, 1, 0, 1, 2, 2]) - x = torch.Tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) - pos = torch.Tensor([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) - edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5], - [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 5, 4]]) - edge_attr = torch.Tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) - batch = torch.tensor([0, 0, 0, 0, 1, 1]) - - data = Batch(x=x, pos=pos, edge_index=edge_index, edge_attr=edge_attr, - batch=batch) - - data = max_pool(cluster, data, transform=lambda x: x) - - assert data.x.tolist() == [[5, 6], [7, 8], [11, 12]] - assert data.pos.tolist() == [[1, 1], [2, 2], [4.5, 4.5]] - assert data.edge_index.tolist() == [[0, 1], [1, 0]] - assert data.edge_attr.tolist() == [4, 4] - assert data.batch.tolist() == [0, 0, 1] - - -def test_max_pool_neighbor_x(): - x = torch.Tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) - edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5], - [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 5, 4]]) - batch = torch.tensor([0, 0, 0, 0, 1, 1]) - - data = Batch(x=x, edge_index=edge_index, batch=batch) - data = max_pool_neighbor_x(data) - - assert data.x.tolist() == [ - [7, 8], - [7, 8], - [7, 8], - [7, 8], - [11, 12], - [11, 12], - ] - assert data.edge_index.tolist() == edge_index.tolist() diff --git a/pytorch_geometric-2.3.1/test/nn/pool/test_pooling_base.py b/pytorch_geometric-2.3.1/test/nn/pool/test_pooling_base.py deleted file mode 100644 index fc93935..0000000 --- a/pytorch_geometric-2.3.1/test/nn/pool/test_pooling_base.py +++ /dev/null @@ -1,52 +0,0 @@ -import torch - -from torch_geometric.nn import MaxAggregation -from torch_geometric.nn.pool.base import Pooling, PoolingOutput -from torch_geometric.nn.pool.connect import Connect -from torch_geometric.nn.pool.select import Select -from torch_geometric.utils import scatter - - -class DummySelect(Select): - def forward(self, x, edge_index, edge_attr, batch): - # Pool into a single node for each graph. - if batch is None: - return edge_index.new_zeros(x.size(0), dtype=torch.long), 1 - return batch, int(batch.max()) + 1 - - -class DummyConnect(Connect): - def forward(self, x, edge_index, edge_attr, batch): - # Return empty graph connection: - if edge_attr is not None: - edge_attr = edge_attr.new_empty((0, ) + edge_attr.size()[1:]) - return edge_index.new_empty(2, 0), edge_attr - - -def test_pooling(): - pool = Pooling(DummySelect(), MaxAggregation(), DummyConnect()) - pool.reset_parameters() - assert str(pool) == ('Pooling(\n' - ' select=DummySelect(),\n' - ' reduce=MaxAggregation(),\n' - ' connect=DummyConnect(),\n' - ')') - - x = torch.randn(10, 8) - edge_index = torch.empty((2, 0), dtype=torch.long) - edge_attr = torch.empty(0, 4) - batch = torch.tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) - - out = pool(x, edge_index) - assert isinstance(out, PoolingOutput) - assert torch.allclose(out.x, x.max(dim=0, keepdim=True)[0]) - assert out.edge_index.size() == (2, 0) - assert out.edge_attr is None - assert out.batch is None - - out = pool(x, edge_index, edge_attr, batch) - assert isinstance(out, PoolingOutput) - assert torch.allclose(out.x, scatter(x, batch, reduce='max')) - assert out.edge_index.size() == (2, 0) - assert out.edge_attr.size() == (0, 4) - assert out.batch.tolist() == [0, 1] diff --git a/pytorch_geometric-2.3.1/test/nn/pool/test_topk_pool.py b/pytorch_geometric-2.3.1/test/nn/pool/test_topk_pool.py deleted file mode 100644 index 8201db5..0000000 --- a/pytorch_geometric-2.3.1/test/nn/pool/test_topk_pool.py +++ /dev/null @@ -1,84 +0,0 @@ -import torch - -from torch_geometric.nn.pool.topk_pool import TopKPooling, filter_adj, topk -from torch_geometric.testing import is_full_test - - -def test_topk(): - x = torch.Tensor([2, 4, 5, 6, 2, 9]) - batch = torch.tensor([0, 0, 1, 1, 1, 1]) - - perm1 = topk(x, 0.5, batch) - assert perm1.tolist() == [1, 5, 3] - assert x[perm1].tolist() == [4, 9, 6] - assert batch[perm1].tolist() == [0, 1, 1] - - perm2 = topk(x, 2, batch) - assert perm2.tolist() == [1, 0, 5, 3] - assert x[perm2].tolist() == [4, 2, 9, 6] - assert batch[perm2].tolist() == [0, 0, 1, 1] - - perm3 = topk(x, 3, batch) - assert perm3.tolist() == [1, 0, 5, 3, 2] - assert x[perm3].tolist() == [4, 2, 9, 6, 5] - assert batch[perm3].tolist() == [0, 0, 1, 1, 1] - - if is_full_test(): - jit = torch.jit.script(topk) - assert torch.equal(jit(x, 0.5, batch), perm1) - assert torch.equal(jit(x, 2, batch), perm2) - assert torch.equal(jit(x, 3, batch), perm3) - - -def test_filter_adj(): - edge_index = torch.tensor([[0, 0, 1, 1, 2, 2, 3, 3], - [1, 3, 0, 2, 1, 3, 0, 2]]) - edge_attr = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8]) - perm = torch.tensor([2, 3]) - - out = filter_adj(edge_index, edge_attr, perm) - assert out[0].tolist() == [[0, 1], [1, 0]] - assert out[1].tolist() == [6, 8] - - if is_full_test(): - jit = torch.jit.script(filter_adj) - - out = jit(edge_index, edge_attr, perm) - assert out[0].tolist() == [[0, 1], [1, 0]] - assert out[1].tolist() == [6, 8] - - -def test_topk_pooling(): - in_channels = 16 - edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3], - [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2]]) - num_nodes = edge_index.max().item() + 1 - x = torch.randn((num_nodes, in_channels)) - - pool1 = TopKPooling(in_channels, ratio=0.5) - assert str(pool1) == 'TopKPooling(16, ratio=0.5, multiplier=1.0)' - out1 = pool1(x, edge_index) - assert out1[0].size() == (num_nodes // 2, in_channels) - assert out1[1].size() == (2, 2) - - pool2 = TopKPooling(in_channels, ratio=None, min_score=0.1) - assert str(pool2) == 'TopKPooling(16, min_score=0.1, multiplier=1.0)' - out2 = pool2(x, edge_index) - assert out2[0].size(0) <= x.size(0) and out2[0].size(1) == (16) - assert out2[1].size(0) == 2 and out2[1].size(1) <= edge_index.size(1) - - pool3 = TopKPooling(in_channels, ratio=2) - assert str(pool3) == 'TopKPooling(16, ratio=2, multiplier=1.0)' - out3 = pool3(x, edge_index) - assert out3[0].size() == (2, in_channels) - assert out3[1].size() == (2, 2) - - if is_full_test(): - jit1 = torch.jit.script(pool1) - assert torch.allclose(jit1(x, edge_index)[0], out1[0]) - - jit2 = torch.jit.script(pool2) - assert torch.allclose(jit2(x, edge_index)[0], out2[0]) - - jit3 = torch.jit.script(pool3) - assert torch.allclose(jit3(x, edge_index)[0], out3[0]) diff --git a/pytorch_geometric-2.3.1/test/nn/test_compile_dynamic.py b/pytorch_geometric-2.3.1/test/nn/test_compile_dynamic.py deleted file mode 100644 index 378eb56..0000000 --- a/pytorch_geometric-2.3.1/test/nn/test_compile_dynamic.py +++ /dev/null @@ -1,51 +0,0 @@ -import random - -import pytest -import torch -from torch import Tensor - -import torch_geometric -from torch_geometric.testing import ( - disableExtensions, - get_random_edge_index, - onlyLinux, - withCUDA, - withPackage, -) -from torch_geometric.utils import scatter - - -class MySAGEConv(torch.nn.Module): - def __init__(self, in_channels: int, out_channels: int): - super().__init__() - self.lin_src = torch.nn.Linear(in_channels, out_channels) - self.lin_dst = torch.nn.Linear(in_channels, out_channels) - - def forward(self, x: Tensor, edge_index: Tensor) -> Tensor: - x_j = x[edge_index[0]] - out = scatter(x_j, edge_index[1], dim_size=x.size(0), reduce='mean') - return self.lin_src(out) + self.lin_dst(x) - - -@withCUDA -@onlyLinux -@disableExtensions -@withPackage('torch>=2.0.0') -def test_dynamic_torch_compile(device): - conv = MySAGEConv(64, 64).to(device) - conv = torch_geometric.compile(conv, dynamic=True) - - optimizer = torch.optim.Adam(conv.parameters(), lr=0.01) - - with pytest.raises(RuntimeError): - for _ in range(10): - N = random.randrange(100, 500) - E = random.randrange(200, 1000) - - x = torch.randn(N, 64, device=device) - edge_index = get_random_edge_index(N, N, E, device=device) - - optimizer.zero_grad() - expected = conv(x, edge_index) - expected.mean().backward() - optimizer.step() diff --git a/pytorch_geometric-2.3.1/test/nn/test_inits.py b/pytorch_geometric-2.3.1/test/nn/test_inits.py deleted file mode 100644 index 6d402f8..0000000 --- a/pytorch_geometric-2.3.1/test/nn/test_inits.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from torch.nn import Linear as Lin -from torch.nn import ReLU -from torch.nn import Sequential as Seq - -from torch_geometric.nn.inits import ( - glorot, - glorot_orthogonal, - ones, - reset, - uniform, - zeros, -) - - -def test_inits(): - x = torch.empty(1, 4) - - uniform(size=4, value=x) - assert x.min() >= -0.5 - assert x.max() <= 0.5 - - glorot(x) - assert x.min() >= -2.0 - assert x.max() <= 2.0 - - glorot_orthogonal(x, scale=1.0) - assert x.min() >= -2.0 - assert x.max() <= 2.0 - - zeros(x) - assert x.tolist() == [[0, 0, 0, 0]] - - ones(x) - assert x.tolist() == [[1, 1, 1, 1]] - - nn = Lin(16, 16) - uniform(size=4, value=nn.weight) - assert min(nn.weight.tolist()[0]) >= -0.5 - assert max(nn.weight.tolist()[0]) <= 0.5 - - glorot(nn.weight) - assert min(nn.weight.tolist()[0]) >= -1.25 - assert max(nn.weight.tolist()[0]) <= 1.25 - - glorot_orthogonal(nn.weight, scale=1.0) - assert min(nn.weight.tolist()[0]) >= -1.25 - assert max(nn.weight.tolist()[0]) <= 1.25 - - -def test_reset(): - nn = Lin(16, 16) - w = nn.weight.clone() - reset(nn) - assert not nn.weight.tolist() == w.tolist() - - nn = Seq(Lin(16, 16), ReLU(), Lin(16, 16)) - w_1, w_2 = nn[0].weight.clone(), nn[2].weight.clone() - reset(nn) - assert not nn[0].weight.tolist() == w_1.tolist() - assert not nn[2].weight.tolist() == w_2.tolist() diff --git a/pytorch_geometric-2.3.1/test/nn/test_model_summary.py b/pytorch_geometric-2.3.1/test/nn/test_model_summary.py deleted file mode 100644 index c357697..0000000 --- a/pytorch_geometric-2.3.1/test/nn/test_model_summary.py +++ /dev/null @@ -1,199 +0,0 @@ -import pytest -import torch -from torch import Tensor, nn -from torch_sparse import SparseTensor - -from torch_geometric.nn import Linear, SAGEConv, summary, to_hetero -from torch_geometric.nn.models import GCN -from torch_geometric.testing import withPackage - - -class GraphSAGE(torch.nn.Module): - def __init__(self): - super().__init__() - self.lin1 = Linear(16, 16) - self.conv1 = SAGEConv(16, 32) - self.lin2 = Linear(32, 32) - - def forward(self, x: Tensor, edge_index: Tensor) -> Tensor: - x = self.lin1(x).relu() - x = self.conv1(x, edge_index).relu() - x = self.lin2(x) - return x - - -class ModuleDictModel(nn.Module): - def __init__(self): - super().__init__() - self.acts = nn.ModuleDict({ - "lrelu": nn.LeakyReLU(), - "prelu": nn.PReLU() - }) - - def forward(self, x: torch.Tensor, act_type: str) -> torch.Tensor: - return self.acts[act_type](x) - - -@pytest.fixture -def gcn(): - torch.manual_seed(1) - model = GCN(32, 16, num_layers=2, out_channels=32) - x = torch.randn(100, 32) - edge_index = torch.randint(100, size=(2, 20)) - adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(100, 100)) - return dict(model=model, x=x, edge_index=edge_index, adj_t=adj.t()) - - -@withPackage('tabulate') -def test_summary_basic(gcn): - expected = """ -+---------------------+--------------------+----------------+----------+ -| Layer | Input Shape | Output Shape | #Param | -|---------------------+--------------------+----------------+----------| -| GCN | [100, 32], [2, 20] | [100, 32] | 1,072 | -| ├─(act)ReLU | [100, 16] | [100, 16] | -- | -| ├─(convs)ModuleList | -- | -- | 1,072 | -| │ └─(0)GCNConv | [100, 32], [2, 20] | [100, 16] | 528 | -| │ └─(1)GCNConv | [100, 16], [2, 20] | [100, 32] | 544 | -+---------------------+--------------------+----------------+----------+ -""" - assert summary(gcn['model'], gcn['x'], gcn['edge_index']) == expected[1:-1] - - -@withPackage('tabulate') -def test_summary_with_sparse_tensor(gcn): - expected = """ -+---------------------+-----------------------+----------------+----------+ -| Layer | Input Shape | Output Shape | #Param | -|---------------------+-----------------------+----------------+----------| -| GCN | [100, 32], [100, 100] | [100, 32] | 1,072 | -| ├─(act)ReLU | [100, 16] | [100, 16] | -- | -| ├─(convs)ModuleList | -- | -- | 1,072 | -| │ └─(0)GCNConv | [100, 32], [100, 100] | [100, 16] | 528 | -| │ └─(1)GCNConv | [100, 16], [100, 100] | [100, 32] | 544 | -+---------------------+-----------------------+----------------+----------+ -""" - assert summary(gcn['model'], gcn['x'], gcn['adj_t']) == expected[1:-1] - - -@withPackage('tabulate') -def test_summary_with_max_depth(gcn): - expected = """ -+---------------------+--------------------+----------------+----------+ -| Layer | Input Shape | Output Shape | #Param | -|---------------------+--------------------+----------------+----------| -| GCN | [100, 32], [2, 20] | [100, 32] | 1,072 | -| ├─(act)ReLU | [100, 16] | [100, 16] | -- | -| ├─(convs)ModuleList | -- | -- | 1,072 | -+---------------------+--------------------+----------------+----------+ -""" - assert summary(gcn['model'], gcn['x'], gcn['edge_index'], - max_depth=1) == expected[1:-1] - - -@withPackage('tabulate') -def test_summary_with_leaf_module(gcn): - expected = """# noqa: E501 -+-----------------------------------------+--------------------+----------------+----------+ -| Layer | Input Shape | Output Shape | #Param | -|-----------------------------------------+--------------------+----------------+----------| -| GCN | [100, 32], [2, 20] | [100, 32] | 1,072 | -| ├─(act)ReLU | [100, 16] | [100, 16] | -- | -| ├─(convs)ModuleList | -- | -- | 1,072 | -| │ └─(0)GCNConv | [100, 32], [2, 20] | [100, 16] | 528 | -| │ │ └─(aggr_module)SumAggregation | [120, 16], [120] | [100, 16] | -- | -| │ │ └─(lin)Linear | [100, 32] | [100, 16] | 512 | -| │ └─(1)GCNConv | [100, 16], [2, 20] | [100, 32] | 544 | -| │ │ └─(aggr_module)SumAggregation | [120, 32], [120] | [100, 32] | -- | -| │ │ └─(lin)Linear | [100, 16] | [100, 32] | 512 | -+-----------------------------------------+--------------------+----------------+----------+ -""" - assert summary(gcn['model'], gcn['x'], gcn['edge_index'], - leaf_module=None) == expected[13:-1] - - -@withPackage('tabulate') -def test_summary_with_reusing_layers(): - act = nn.ReLU(inplace=True) - model1 = nn.Sequential(act, nn.Identity(), act, nn.Identity(), act) - model2 = nn.Sequential( - nn.ReLU(inplace=True), - nn.Identity(), - nn.ReLU(inplace=True), - nn.Identity(), - nn.ReLU(inplace=True), - ) - x = torch.randn(10) - - assert summary(model1, x) == summary(model2, x) - - -@withPackage('tabulate') -def test_summary_with_to_hetero_model(): - x_dict = { - 'p': torch.randn(100, 16), - 'a': torch.randn(100, 16), - } - edge_index_dict = { - ('p', 'to', 'p'): torch.randint(100, (2, 200)), - ('p', 'to', 'a'): torch.randint(100, (2, 200)), - ('a', 'to', 'p'): torch.randint(100, (2, 200)), - } - metadata = list(x_dict.keys()), list(edge_index_dict.keys()) - model = to_hetero(GraphSAGE(), metadata) - - expected = """ -+---------------------------+---------------------+----------------+----------+ -| Layer | Input Shape | Output Shape | #Param | -|---------------------------+---------------------+----------------+----------| -| GraphModule | | | 5,824 | -| ├─(lin1)ModuleDict | -- | -- | 544 | -| │ └─(p)Linear | [100, 16] | [100, 16] | 272 | -| │ └─(a)Linear | [100, 16] | [100, 16] | 272 | -| ├─(conv1)ModuleDict | -- | -- | 3,168 | -| │ └─(p__to__p)SAGEConv | [100, 16], [2, 200] | [100, 32] | 1,056 | -| │ └─(p__to__a)SAGEConv | [2, 200] | [100, 32] | 1,056 | -| │ └─(a__to__p)SAGEConv | [2, 200] | [100, 32] | 1,056 | -| ├─(lin2)ModuleDict | -- | -- | 2,112 | -| │ └─(p)Linear | [100, 32] | [100, 32] | 1,056 | -| │ └─(a)Linear | [100, 32] | [100, 32] | 1,056 | -+---------------------------+---------------------+----------------+----------+ -""" - assert summary(model, x_dict, edge_index_dict) == expected[1:-1] - - -@withPackage('tabulate') -def test_summary_with_module_dict_model(): - model = ModuleDictModel() - x = torch.randn(100, 32) - - expected = """ -+-------------------------+---------------+----------------+----------+ -| Layer | Input Shape | Output Shape | #Param | -|-------------------------+---------------+----------------+----------| -| ModuleDictModel | [100, 32] | [100, 32] | 1 | -| ├─(acts)ModuleDict | -- | -- | 1 | -| │ └─(lrelu)LeakyReLU | -- | -- | -- | -| │ └─(prelu)PReLU | [100, 32] | [100, 32] | 1 | -+-------------------------+---------------+----------------+----------+ -""" - assert summary(model, x, 'prelu') == expected[1:-1] - - -@withPackage('tabulate') -def test_summary_with_jit_model(): - model = nn.Sequential(nn.Linear(32, 16), nn.ReLU(), nn.Linear(16, 8)) - model = torch.jit.script(model) - x = torch.randn(100, 32) - - expected = """ -+----------------------------+---------------+----------------+----------+ -| Layer | Input Shape | Output Shape | #Param | -|----------------------------+---------------+----------------+----------| -| RecursiveScriptModule | -- | -- | 664 | -| ├─(0)RecursiveScriptModule | -- | -- | 528 | -| ├─(1)RecursiveScriptModule | -- | -- | -- | -| ├─(2)RecursiveScriptModule | -- | -- | 136 | -+----------------------------+---------------+----------------+----------+ -""" - assert summary(model, x) == expected[1:-1] diff --git a/pytorch_geometric-2.3.1/test/nn/test_module_dict.py b/pytorch_geometric-2.3.1/test/nn/test_module_dict.py deleted file mode 100644 index 16fced0..0000000 --- a/pytorch_geometric-2.3.1/test/nn/test_module_dict.py +++ /dev/null @@ -1,30 +0,0 @@ -import torch - -from torch_geometric.nn.module_dict import ModuleDict - - -def test_internal_external_key_conversion(): - assert ModuleDict.to_internal_key('a.b') == 'a#b' - assert ModuleDict.to_internal_key('ab') == 'ab' - assert ModuleDict.to_internal_key('a.b.c') == 'a#b#c' - - assert ModuleDict.to_external_key('a#b') == 'a.b' - assert ModuleDict.to_external_key('a#b#c') == 'a.b.c' - - -def test_dot_syntax_keys(): - module_dict = ModuleDict({ - 'lin1': torch.nn.Linear(16, 16), - 'model.lin2': torch.nn.Linear(8, 8), - 'model.sub_model.lin3': torch.nn.Linear(4, 4), - }) - - expected_keys = {'lin1', 'model.lin2', 'model.sub_model.lin3'} - assert set(module_dict.keys()) == expected_keys - assert set([key for key, _ in module_dict.items()]) == expected_keys - - for key in expected_keys: - assert key in module_dict - - del module_dict['model.lin2'] - assert 'model.lin2' not in module_dict diff --git a/pytorch_geometric-2.3.1/test/nn/test_parameter_dict.py b/pytorch_geometric-2.3.1/test/nn/test_parameter_dict.py deleted file mode 100644 index 34cde8c..0000000 --- a/pytorch_geometric-2.3.1/test/nn/test_parameter_dict.py +++ /dev/null @@ -1,33 +0,0 @@ -from typing import Mapping - -import torch -from torch.nn import Parameter - -from torch_geometric.nn.parameter_dict import ParameterDict - - -def test_internal_external_key_conversion(): - assert ParameterDict.to_internal_key("a.b") == "a#b" - assert ParameterDict.to_internal_key("ab") == "ab" - assert ParameterDict.to_internal_key("a.b.c") == "a#b#c" - - assert ParameterDict.to_external_key("a#b") == "a.b" - assert ParameterDict.to_external_key("a#b#c") == "a.b.c" - - -def test_dot_syntax_keys(): - parameters: Mapping[str, Parameter] = { - "param1": Parameter(torch.Tensor(16, 16)), - "model.param2": Parameter(torch.Tensor(8, 8)), - "model.sub_model.param3": Parameter(torch.Tensor(4, 4)), - } - parameter_dict = ParameterDict(parameters) - - expected_keys = {"param1", "model.param2", "model.sub_model.param3"} - assert set(parameter_dict.keys()) == expected_keys - - for key in expected_keys: - assert key in parameter_dict - - del parameter_dict["model.param2"] - assert "model.param2" not in parameter_dict diff --git a/pytorch_geometric-2.3.1/test/nn/test_resolver.py b/pytorch_geometric-2.3.1/test/nn/test_resolver.py deleted file mode 100644 index 46ffd4e..0000000 --- a/pytorch_geometric-2.3.1/test/nn/test_resolver.py +++ /dev/null @@ -1,81 +0,0 @@ -import pytest -import torch -from torch.optim.lr_scheduler import ConstantLR, LambdaLR, ReduceLROnPlateau - -import torch_geometric -from torch_geometric.nn.resolver import ( - activation_resolver, - aggregation_resolver, - lr_scheduler_resolver, - normalization_resolver, -) - - -def test_activation_resolver(): - assert isinstance(activation_resolver(torch.nn.ELU()), torch.nn.ELU) - assert isinstance(activation_resolver(torch.nn.ReLU()), torch.nn.ReLU) - assert isinstance(activation_resolver(torch.nn.PReLU()), torch.nn.PReLU) - - assert isinstance(activation_resolver('elu'), torch.nn.ELU) - assert isinstance(activation_resolver('relu'), torch.nn.ReLU) - assert isinstance(activation_resolver('prelu'), torch.nn.PReLU) - - -@pytest.mark.parametrize('aggr_tuple', [ - (torch_geometric.nn.aggr.MeanAggregation, 'mean'), - (torch_geometric.nn.aggr.SumAggregation, 'sum'), - (torch_geometric.nn.aggr.SumAggregation, 'add'), - (torch_geometric.nn.aggr.MaxAggregation, 'max'), - (torch_geometric.nn.aggr.MinAggregation, 'min'), - (torch_geometric.nn.aggr.MulAggregation, 'mul'), - (torch_geometric.nn.aggr.VarAggregation, 'var'), - (torch_geometric.nn.aggr.StdAggregation, 'std'), - (torch_geometric.nn.aggr.SoftmaxAggregation, 'softmax'), - (torch_geometric.nn.aggr.PowerMeanAggregation, 'powermean'), -]) -def test_aggregation_resolver(aggr_tuple): - aggr_module, aggr_repr = aggr_tuple - assert isinstance(aggregation_resolver(aggr_module()), aggr_module) - assert isinstance(aggregation_resolver(aggr_repr), aggr_module) - - -@pytest.mark.parametrize('norm_tuple', [ - (torch_geometric.nn.norm.BatchNorm, 'batch', (16, )), - (torch_geometric.nn.norm.BatchNorm, 'batch_norm', (16, )), - (torch_geometric.nn.norm.InstanceNorm, 'instance_norm', (16, )), - (torch_geometric.nn.norm.LayerNorm, 'layer_norm', (16, )), - (torch_geometric.nn.norm.GraphNorm, 'graph_norm', (16, )), - (torch_geometric.nn.norm.GraphSizeNorm, 'graphsize_norm', ()), - (torch_geometric.nn.norm.PairNorm, 'pair_norm', ()), - (torch_geometric.nn.norm.MessageNorm, 'message_norm', ()), - (torch_geometric.nn.norm.DiffGroupNorm, 'diffgroup_norm', (16, 4)), -]) -def test_normalization_resolver(norm_tuple): - norm_module, norm_repr, norm_args = norm_tuple - assert isinstance(normalization_resolver(norm_module(*norm_args)), - norm_module) - assert isinstance(normalization_resolver(norm_repr, *norm_args), - norm_module) - - -@pytest.mark.parametrize('scheduler_args', [ - ('constant_with_warmup', LambdaLR), - ('linear_with_warmup', LambdaLR), - ('cosine_with_warmup', LambdaLR), - ('cosine_with_warmup_restarts', LambdaLR), - ('polynomial_with_warmup', LambdaLR), - ('constant', ConstantLR), - ('ReduceLROnPlateau', ReduceLROnPlateau), -]) -def test_lr_scheduler_resolver(scheduler_args): - scheduler_name, scheduler_cls = scheduler_args - - model = torch.nn.Linear(10, 5) - optimizer = torch.optim.Adam(model.parameters(), lr=0.01) - - lr_scheduler = lr_scheduler_resolver( - scheduler_name, - optimizer, - num_training_steps=100, - ) - assert isinstance(lr_scheduler, scheduler_cls) diff --git a/pytorch_geometric-2.3.1/test/nn/test_sequential.py b/pytorch_geometric-2.3.1/test/nn/test_sequential.py deleted file mode 100644 index 6f54889..0000000 --- a/pytorch_geometric-2.3.1/test/nn/test_sequential.py +++ /dev/null @@ -1,141 +0,0 @@ -from collections import OrderedDict - -import torch -import torch.fx -from torch.nn import Dropout, Linear, ReLU -from torch_sparse import SparseTensor - -from torch_geometric.nn import ( - GCNConv, - JumpingKnowledge, - MessagePassing, - Sequential, - global_mean_pool, -) - - -def test_sequential(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - batch = torch.zeros(4, dtype=torch.long) - - model = Sequential('x, edge_index', [ - (GCNConv(16, 64), 'x, edge_index -> x'), - ReLU(inplace=True), - (GCNConv(64, 64), 'x, edge_index -> x'), - ReLU(inplace=True), - Linear(64, 7), - ]) - model.reset_parameters() - - assert len(model) == 5 - assert str(model) == ( - 'Sequential(\n' - ' (0): GCNConv(16, 64)\n' - ' (1): ReLU(inplace=True)\n' - ' (2): GCNConv(64, 64)\n' - ' (3): ReLU(inplace=True)\n' - ' (4): Linear(in_features=64, out_features=7, bias=True)\n' - ')') - - assert isinstance(model[0], GCNConv) - assert isinstance(model[1], ReLU) - assert isinstance(model[2], GCNConv) - assert isinstance(model[3], ReLU) - assert isinstance(model[4], Linear) - - out = model(x, edge_index) - assert out.size() == (4, 7) - - model = Sequential('x, edge_index, batch', [ - (Dropout(p=0.5), 'x -> x'), - (GCNConv(16, 64), 'x, edge_index -> x1'), - ReLU(inplace=True), - (GCNConv(64, 64), 'x1, edge_index -> x2'), - ReLU(inplace=True), - (lambda x1, x2: [x1, x2], 'x1, x2 -> xs'), - (JumpingKnowledge('cat', 64, num_layers=2), 'xs -> x'), - (global_mean_pool, 'x, batch -> x'), - Linear(2 * 64, 7), - ]) - model.reset_parameters() - - out = model(x, edge_index, batch) - assert out.size() == (1, 7) - - -def test_sequential_jittable(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - adj_t = SparseTensor(row=edge_index[0], col=edge_index[1]).t() - - model = Sequential('x: Tensor, edge_index: Tensor', [ - (GCNConv(16, 64).jittable(), 'x, edge_index -> x'), - ReLU(inplace=True), - (GCNConv(64, 64).jittable(), 'x, edge_index -> x'), - ReLU(inplace=True), - Linear(64, 7), - ]) - torch.jit.script(model)(x, edge_index) - - model = Sequential('x: Tensor, edge_index: SparseTensor', [ - (GCNConv(16, 64).jittable(), 'x, edge_index -> x'), - ReLU(inplace=True), - (GCNConv(64, 64).jittable(), 'x, edge_index -> x'), - ReLU(inplace=True), - Linear(64, 7), - ]) - torch.jit.script(model)(x, adj_t) - - -def symbolic_trace(module): - class Tracer(torch.fx.Tracer): - def is_leaf_module(self, module, *args, **kwargs) -> bool: - return (isinstance(module, MessagePassing) - or super().is_leaf_module(module, *args, **kwargs)) - - return torch.fx.GraphModule(module, Tracer().trace(module)) - - -def test_sequential_tracable(): - model = Sequential('x, edge_index', [ - (GCNConv(16, 64), 'x, edge_index -> x1'), - ReLU(inplace=True), - (GCNConv(64, 64), 'x1, edge_index -> x2'), - ReLU(inplace=True), - (lambda x1, x2: x1 + x2, 'x1, x2 -> x'), - Linear(64, 7), - ]) - symbolic_trace(model) - - -def test_sequential_with_multiple_return_values(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - - model = Sequential('x, edge_index', [ - (GCNConv(16, 32), 'x, edge_index -> x1'), - (GCNConv(32, 64), 'x1, edge_index -> x2'), - (lambda x1, x2: (x1, x2), 'x1, x2 -> x1, x2'), - ]) - - x1, x2 = model(x, edge_index) - assert x1.size() == (4, 32) - assert x2.size() == (4, 64) - - -def test_sequential_with_ordered_dict(): - x = torch.randn(4, 16) - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - - model = Sequential( - 'x, edge_index', modules=OrderedDict([ - ('conv1', (GCNConv(16, 32), 'x, edge_index -> x')), - ('conv2', (GCNConv(32, 64), 'x, edge_index -> x')), - ])) - - assert isinstance(model.conv1, GCNConv) - assert isinstance(model.conv2, GCNConv) - - x = model(x, edge_index) - assert x.size() == (4, 64) diff --git a/pytorch_geometric-2.3.1/test/nn/unpool/test_knn_interpolate.py b/pytorch_geometric-2.3.1/test/nn/unpool/test_knn_interpolate.py deleted file mode 100644 index 734ad83..0000000 --- a/pytorch_geometric-2.3.1/test/nn/unpool/test_knn_interpolate.py +++ /dev/null @@ -1,16 +0,0 @@ -import torch - -from torch_geometric.nn import knn_interpolate -from torch_geometric.testing import withPackage - - -@withPackage('torch_cluster') -def test_knn_interpolate(): - x = torch.Tensor([[1], [10], [100], [-1], [-10], [-100]]) - pos_x = torch.Tensor([[-1, 0], [0, 0], [1, 0], [-2, 0], [0, 0], [2, 0]]) - pos_y = torch.Tensor([[-1, -1], [1, 1], [-2, -2], [2, 2]]) - batch_x = torch.tensor([0, 0, 0, 1, 1, 1]) - batch_y = torch.tensor([0, 0, 1, 1]) - - y = knn_interpolate(x, pos_x, pos_y, batch_x, batch_y, k=2) - assert y.tolist() == [[4], [70], [-4], [-70]] diff --git a/pytorch_geometric-2.3.1/test/profile/test_profile.py b/pytorch_geometric-2.3.1/test/profile/test_profile.py deleted file mode 100644 index d3a2477..0000000 --- a/pytorch_geometric-2.3.1/test/profile/test_profile.py +++ /dev/null @@ -1,99 +0,0 @@ -import os.path -import warnings - -import torch -import torch.nn.functional as F - -from torch_geometric.nn import GraphSAGE -from torch_geometric.profile import ( - get_stats_summary, - profileit, - rename_profile_file, - timeit, -) -from torch_geometric.profile.profile import torch_profile -from torch_geometric.testing import onlyCUDA, onlyLinux, withCUDA, withPackage - - -@withCUDA -@onlyLinux -def test_timeit(device): - x = torch.randn(100, 16, device=device) - lin = torch.nn.Linear(16, 32).to(device) - - with timeit(log=False) as t: - assert not hasattr(t, 'duration') - - with torch.no_grad(): - lin(x) - t.reset() - assert t.duration > 0 - - del t.duration - assert not hasattr(t, 'duration') - assert t.duration > 0 - - -@onlyCUDA -@withPackage('pytorch_memlab') -def test_profileit(get_dataset): - warnings.filterwarnings('ignore', '.*arguments of DataFrame.drop.*') - - dataset = get_dataset(name='Cora') - data = dataset[0].cuda() - model = GraphSAGE(dataset.num_features, hidden_channels=64, num_layers=3, - out_channels=dataset.num_classes).cuda() - optimizer = torch.optim.Adam(model.parameters(), lr=0.01) - - @profileit() - def train(model, x, edge_index, y): - model.train() - optimizer.zero_grad() - out = model(x, edge_index) - loss = F.cross_entropy(out, y) - loss.backward() - return float(loss) - - stats_list = [] - for epoch in range(5): - _, stats = train(model, data.x, data.edge_index, data.y) - assert len(stats) == 6 - assert stats.time > 0 - assert stats.max_allocated_cuda > 0 - assert stats.max_reserved_cuda > 0 - assert stats.max_active_cuda > 0 - assert stats.nvidia_smi_free_cuda > 0 - assert stats.nvidia_smi_used_cuda > 0 - - if epoch >= 2: # Warm-up - stats_list.append(stats) - - stats_summary = get_stats_summary(stats_list) - assert len(stats_summary) == 7 - assert stats_summary.time_mean > 0 - assert stats_summary.time_std > 0 - assert stats_summary.max_allocated_cuda > 0 - assert stats_summary.max_reserved_cuda > 0 - assert stats_summary.max_active_cuda > 0 - assert stats_summary.min_nvidia_smi_free_cuda > 0 - assert stats_summary.max_nvidia_smi_used_cuda > 0 - - -@withCUDA -def test_torch_profile(capfd, get_dataset, device): - dataset = get_dataset(name='Cora') - data = dataset[0].to(device) - model = GraphSAGE(dataset.num_features, hidden_channels=64, num_layers=3, - out_channels=dataset.num_classes).to(device) - - with torch_profile(): - model(data.x, data.edge_index) - - out, _ = capfd.readouterr() - assert 'Self CPU time total' in out - if data.x.is_cuda: - assert 'Self CUDA time total' in out - - rename_profile_file('test_profile') - assert os.path.exists('profile-test_profile.json') - os.remove('profile-test_profile.json') diff --git a/pytorch_geometric-2.3.1/test/sampler/test_sampler_base.py b/pytorch_geometric-2.3.1/test/sampler/test_sampler_base.py deleted file mode 100644 index a1edf57..0000000 --- a/pytorch_geometric-2.3.1/test/sampler/test_sampler_base.py +++ /dev/null @@ -1,63 +0,0 @@ -import pytest - -from torch_geometric.sampler.base import NumNeighbors - - -def test_homogeneous_num_neighbors(): - with pytest.raises(ValueError, match="'default' must be set to 'None'"): - num_neighbors = NumNeighbors([25, 10], default=[-1, -1]) - - num_neighbors = NumNeighbors([25, 10]) - assert str(num_neighbors) == 'NumNeighbors(values=[25, 10], default=None)' - - assert num_neighbors.get_values() == [25, 10] - assert num_neighbors.__dict__['_values'] == [25, 10] - assert num_neighbors.get_values() == [25, 10] # Test caching. - - assert num_neighbors.get_mapped_values() == [25, 10] - assert num_neighbors.__dict__['_mapped_values'] == [25, 10] - assert num_neighbors.get_mapped_values() == [25, 10] # Test caching. - - assert num_neighbors.num_hops == 2 - assert num_neighbors.__dict__['_num_hops'] == 2 - assert num_neighbors.num_hops == 2 # Test caching. - - -def test_heterogeneous_num_neighbors_list(): - num_neighbors = NumNeighbors([25, 10]) - - values = num_neighbors.get_values([('A', 'B'), ('B', 'A')]) - assert values == {('A', 'B'): [25, 10], ('B', 'A'): [25, 10]} - - values = num_neighbors.get_mapped_values([('A', 'B'), ('B', 'A')]) - assert values == {'A__to__B': [25, 10], 'B__to__A': [25, 10]} - - assert num_neighbors.num_hops == 2 - - -def test_heterogeneous_num_neighbors_dict_and_default(): - num_neighbors = NumNeighbors({('A', 'B'): [25, 10]}, default=[-1]) - with pytest.raises(ValueError, match="hops must be the same across all"): - values = num_neighbors.get_values([('A', 'B'), ('B', 'A')]) - - num_neighbors = NumNeighbors({('A', 'B'): [25, 10]}, default=[-1, -1]) - - values = num_neighbors.get_values([('A', 'B'), ('B', 'A')]) - assert values == {('A', 'B'): [25, 10], ('B', 'A'): [-1, -1]} - - values = num_neighbors.get_mapped_values([('A', 'B'), ('B', 'A')]) - assert values == {'A__to__B': [25, 10], 'B__to__A': [-1, -1]} - - assert num_neighbors.num_hops == 2 - - -def test_heterogeneous_num_neighbors_empty_dict(): - num_neighbors = NumNeighbors({}, default=[25, 10]) - - values = num_neighbors.get_values([('A', 'B'), ('B', 'A')]) - assert values == {('A', 'B'): [25, 10], ('B', 'A'): [25, 10]} - - values = num_neighbors.get_mapped_values([('A', 'B'), ('B', 'A')]) - assert values == {'A__to__B': [25, 10], 'B__to__A': [25, 10]} - - assert num_neighbors.num_hops == 2 diff --git a/pytorch_geometric-2.3.1/test/transforms/test_add_positional_encoding.py b/pytorch_geometric-2.3.1/test/transforms/test_add_positional_encoding.py deleted file mode 100644 index ab64a34..0000000 --- a/pytorch_geometric-2.3.1/test/transforms/test_add_positional_encoding.py +++ /dev/null @@ -1,93 +0,0 @@ -import copy - -import torch - -from torch_geometric.data import Data -from torch_geometric.transforms import ( - AddLaplacianEigenvectorPE, - AddRandomWalkPE, -) - - -def test_add_laplacian_eigenvector_pe(): - x = torch.randn(6, 4) - edge_index = torch.tensor([[0, 1, 0, 4, 1, 4, 2, 3, 3, 5], - [1, 0, 4, 0, 4, 1, 3, 2, 5, 3]]) - data = Data(x=x, edge_index=edge_index) - - transform = AddLaplacianEigenvectorPE(k=3) - assert str(transform) == 'AddLaplacianEigenvectorPE()' - out = transform(copy.copy(data)) - assert out.laplacian_eigenvector_pe.size() == (6, 3) - - transform = AddLaplacianEigenvectorPE(k=3, attr_name=None) - out = transform(copy.copy(data)) - assert out.x.size() == (6, 4 + 3) - - transform = AddLaplacianEigenvectorPE(k=3, attr_name='x') - out = transform(copy.copy(data)) - assert out.x.size() == (6, 3) - - # Output tests: - edge_index = torch.tensor([[0, 1, 0, 4, 1, 4, 2, 3, 3, 5, 2, 5], - [1, 0, 4, 0, 4, 1, 3, 2, 5, 3, 5, 2]]) - data = Data(x=x, edge_index=edge_index) - - transform1 = AddLaplacianEigenvectorPE(k=1, is_undirected=True) - transform2 = AddLaplacianEigenvectorPE(k=1, is_undirected=False) - - # Clustering test with first non-trivial eigenvector (Fiedler vector) - pe = transform1(copy.copy(data)).laplacian_eigenvector_pe - pe_cluster_1 = pe[[0, 1, 4]] - pe_cluster_2 = pe[[2, 3, 5]] - assert not torch.allclose(pe_cluster_1, pe_cluster_2) - assert torch.allclose(pe_cluster_1, pe_cluster_1.mean()) - assert torch.allclose(pe_cluster_2, pe_cluster_2.mean()) - - pe = transform2(copy.copy(data)).laplacian_eigenvector_pe - pe_cluster_1 = pe[[0, 1, 4]] - pe_cluster_2 = pe[[2, 3, 5]] - assert not torch.allclose(pe_cluster_1, pe_cluster_2) - assert torch.allclose(pe_cluster_1, pe_cluster_1.mean()) - assert torch.allclose(pe_cluster_2, pe_cluster_2.mean()) - - -def test_add_random_walk_pe(): - x = torch.randn(6, 4) - edge_index = torch.tensor([[0, 1, 0, 4, 1, 4, 2, 3, 3, 5], - [1, 0, 4, 0, 4, 1, 3, 2, 5, 3]]) - data = Data(x=x, edge_index=edge_index) - - transform = AddRandomWalkPE(walk_length=3) - assert str(transform) == 'AddRandomWalkPE()' - out = transform(copy.copy(data)) - assert out.random_walk_pe.size() == (6, 3) - - transform = AddRandomWalkPE(walk_length=3, attr_name=None) - out = transform(copy.copy(data)) - assert out.x.size() == (6, 4 + 3) - - transform = AddRandomWalkPE(walk_length=3, attr_name='x') - out = transform(copy.copy(data)) - assert out.x.size() == (6, 3) - - # Output tests: - assert out.x.tolist() == [ - [0.0, 0.5, 0.25], - [0.0, 0.5, 0.25], - [0.0, 0.5, 0.00], - [0.0, 1.0, 0.00], - [0.0, 0.5, 0.25], - [0.0, 0.5, 0.00], - ] - - edge_index = torch.tensor([[0, 1, 2], [0, 1, 2]]) - data = Data(edge_index=edge_index, num_nodes=4) - out = transform(copy.copy(data)) - - assert out.x.tolist() == [ - [1.0, 1.0, 1.0], - [1.0, 1.0, 1.0], - [1.0, 1.0, 1.0], - [0.0, 0.0, 0.0], - ] diff --git a/pytorch_geometric-2.3.1/test/transforms/test_cartesian.py b/pytorch_geometric-2.3.1/test/transforms/test_cartesian.py deleted file mode 100644 index 9b66f6d..0000000 --- a/pytorch_geometric-2.3.1/test/transforms/test_cartesian.py +++ /dev/null @@ -1,27 +0,0 @@ -import torch - -from torch_geometric.data import Data -from torch_geometric.transforms import Cartesian - - -def test_cartesian(): - assert str(Cartesian()) == 'Cartesian(norm=True, max_value=None)' - - pos = torch.Tensor([[-1, 0], [0, 0], [2, 0]]) - edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - edge_attr = torch.Tensor([1, 2, 3, 4]) - - data = Data(edge_index=edge_index, pos=pos) - data = Cartesian(norm=False)(data) - assert len(data) == 3 - assert data.pos.tolist() == pos.tolist() - assert data.edge_index.tolist() == edge_index.tolist() - assert data.edge_attr.tolist() == [[-1, 0], [1, 0], [-2, 0], [2, 0]] - - data = Data(edge_index=edge_index, pos=pos, edge_attr=edge_attr) - data = Cartesian(norm=True)(data) - assert len(data) == 3 - assert data.pos.tolist() == pos.tolist() - assert data.edge_index.tolist() == edge_index.tolist() - assert data.edge_attr.tolist() == [[1, 0.25, 0.5], [2, 0.75, 0.5], - [3, 0, 0.5], [4, 1, 0.5]] diff --git a/pytorch_geometric-2.3.1/test/transforms/test_compose.py b/pytorch_geometric-2.3.1/test/transforms/test_compose.py deleted file mode 100644 index fe086a0..0000000 --- a/pytorch_geometric-2.3.1/test/transforms/test_compose.py +++ /dev/null @@ -1,21 +0,0 @@ -import torch - -import torch_geometric.transforms as T -from torch_geometric.data import Data - - -def test_compose(): - transform = T.Compose([T.Center(), T.AddSelfLoops()]) - assert str(transform) == ('Compose([\n' - ' Center(),\n' - ' AddSelfLoops()\n' - '])') - - pos = torch.Tensor([[0, 0], [2, 0], [4, 0]]) - edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - - data = Data(edge_index=edge_index, pos=pos) - data = transform(data) - assert len(data) == 2 - assert data.pos.tolist() == [[-2, 0], [0, 0], [2, 0]] - assert data.edge_index.size() == (2, 7) diff --git a/pytorch_geometric-2.3.1/test/transforms/test_distance.py b/pytorch_geometric-2.3.1/test/transforms/test_distance.py deleted file mode 100644 index 1f4537b..0000000 --- a/pytorch_geometric-2.3.1/test/transforms/test_distance.py +++ /dev/null @@ -1,26 +0,0 @@ -import torch - -from torch_geometric.data import Data -from torch_geometric.transforms import Distance - - -def test_distance(): - assert str(Distance()) == 'Distance(norm=True, max_value=None)' - - pos = torch.Tensor([[-1, 0], [0, 0], [2, 0]]) - edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - edge_attr = torch.Tensor([1, 1, 1, 1]) - - data = Data(edge_index=edge_index, pos=pos) - data = Distance(norm=False)(data) - assert len(data) == 3 - assert data.pos.tolist() == pos.tolist() - assert data.edge_index.tolist() == edge_index.tolist() - assert data.edge_attr.tolist() == [[1], [1], [2], [2]] - - data = Data(edge_index=edge_index, pos=pos, edge_attr=edge_attr) - data = Distance(norm=True)(data) - assert len(data) == 3 - assert data.pos.tolist() == pos.tolist() - assert data.edge_index.tolist() == edge_index.tolist() - assert data.edge_attr.tolist() == [[1, 0.5], [1, 0.5], [1, 1], [1, 1]] diff --git a/pytorch_geometric-2.3.1/test/transforms/test_gcn_norm.py b/pytorch_geometric-2.3.1/test/transforms/test_gcn_norm.py deleted file mode 100644 index 9eb2bd5..0000000 --- a/pytorch_geometric-2.3.1/test/transforms/test_gcn_norm.py +++ /dev/null @@ -1,45 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -from torch_geometric.data import Data -from torch_geometric.transforms import GCNNorm - - -def test_gcn_norm(): - edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - edge_weight = torch.ones(edge_index.size(1)) - adj_t = SparseTensor.from_edge_index(edge_index, edge_weight).t() - - transform = GCNNorm() - assert str(transform) == 'GCNNorm(add_self_loops=True)' - - expected_edge_index = [[0, 1, 1, 2, 0, 1, 2], [1, 0, 2, 1, 0, 1, 2]] - expected_edge_weight = torch.tensor( - [0.4082, 0.4082, 0.4082, 0.4082, 0.5000, 0.3333, 0.5000]) - - data = Data(edge_index=edge_index, edge_weight=edge_weight, num_nodes=3) - data = transform(data) - assert len(data) == 3 - assert data.num_nodes == 3 - assert data.edge_index.tolist() == expected_edge_index - assert torch.allclose(data.edge_weight, expected_edge_weight, atol=1e-4) - - data = Data(edge_index=edge_index, num_nodes=3) - data = transform(data) - assert len(data) == 3 - assert data.num_nodes == 3 - assert data.edge_index.tolist() == expected_edge_index - assert torch.allclose(data.edge_weight, expected_edge_weight, atol=1e-4) - - # For `SparseTensor`, expected outputs will be sorted: - expected_edge_index = [[0, 0, 1, 1, 1, 2, 2], [0, 1, 0, 1, 2, 1, 2]] - expected_edge_weight = torch.tensor( - [0.500, 0.4082, 0.4082, 0.3333, 0.4082, 0.4082, 0.5000]) - - data = Data(adj_t=adj_t) - data = transform(data) - assert len(data) == 1 - row, col, value = data.adj_t.coo() - assert row.tolist() == expected_edge_index[0] - assert col.tolist() == expected_edge_index[1] - assert torch.allclose(value, expected_edge_weight, atol=1e-4) diff --git a/pytorch_geometric-2.3.1/test/transforms/test_generate_normals.py b/pytorch_geometric-2.3.1/test/transforms/test_generate_normals.py deleted file mode 100644 index a426c19..0000000 --- a/pytorch_geometric-2.3.1/test/transforms/test_generate_normals.py +++ /dev/null @@ -1,29 +0,0 @@ -import torch - -from torch_geometric.data import Data -from torch_geometric.transforms import GenerateMeshNormals - - -def test_generate_normals(): - transform = GenerateMeshNormals() - assert str(transform) == 'GenerateMeshNormals()' - - pos = torch.Tensor([ - [0, 0, 0], - [-2, 1, 0], - [-1, 1, 0], - [0, 1, 0], - [1, 1, 0], - [2, 1, 0], - ]) - face = torch.tensor([ - [0, 0, 0, 0], - [1, 2, 3, 4], - [2, 3, 4, 5], - ]) - - data = transform(Data(pos=pos, face=face)) - assert len(data) == 3 - assert data.pos.tolist() == pos.tolist() - assert data.face.tolist() == face.tolist() - assert data.norm.tolist() == [[0, 0, -1]] * 6 diff --git a/pytorch_geometric-2.3.1/test/transforms/test_normalize_rotation.py b/pytorch_geometric-2.3.1/test/transforms/test_normalize_rotation.py deleted file mode 100644 index cc19236..0000000 --- a/pytorch_geometric-2.3.1/test/transforms/test_normalize_rotation.py +++ /dev/null @@ -1,45 +0,0 @@ -from math import sqrt - -import torch - -from torch_geometric.data import Data -from torch_geometric.transforms import NormalizeRotation - - -def test_normalize_rotation(): - assert str(NormalizeRotation()) == 'NormalizeRotation()' - - pos = torch.Tensor([[-2, -2], [-1, -1], [0, 0], [1, 1], [2, 2]]) - normal = torch.Tensor([[-1, 1], [-1, 1], [-1, 1], [-1, 1], [-1, 1]]) - data = Data(pos=pos) - data.normal = normal - data = NormalizeRotation()(data) - assert len(data) == 2 - - expected_pos = torch.Tensor([ - [-2 * sqrt(2), 0], - [-sqrt(2), 0], - [0, 0], - [sqrt(2), 0], - [2 * sqrt(2), 0], - ]) - expected_normal = [[0, 1], [0, 1], [0, 1], [0, 1], [0, 1]] - - assert torch.allclose(data.pos, expected_pos, atol=1e-04) - assert data.normal.tolist() == expected_normal - - data = Data(pos=pos) - data.normal = normal - data = NormalizeRotation(max_points=3)(data) - assert len(data) == 2 - - assert torch.allclose(data.pos, expected_pos, atol=1e-04) - assert data.normal.tolist() == expected_normal - - data = Data(pos=pos) - data.normal = normal - data = NormalizeRotation(sort=True)(data) - assert len(data) == 2 - - assert torch.allclose(data.pos, expected_pos, atol=1e-04) - assert data.normal.tolist() == expected_normal diff --git a/pytorch_geometric-2.3.1/test/transforms/test_one_hot_degree.py b/pytorch_geometric-2.3.1/test/transforms/test_one_hot_degree.py deleted file mode 100644 index 972ce03..0000000 --- a/pytorch_geometric-2.3.1/test/transforms/test_one_hot_degree.py +++ /dev/null @@ -1,26 +0,0 @@ -import torch - -from torch_geometric.data import Data -from torch_geometric.transforms import OneHotDegree - - -def test_one_hot_degree(): - assert str(OneHotDegree(max_degree=3)) == 'OneHotDegree(3)' - - edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]]) - x = torch.Tensor([1, 1, 1, 1]) - - data = Data(edge_index=edge_index, num_nodes=4) - data = OneHotDegree(max_degree=3)(data) - assert len(data) == 3 - assert data.edge_index.tolist() == edge_index.tolist() - assert data.x.tolist() == [[0, 0, 0, 1], [0, 1, 0, 0], [0, 1, 0, 0], - [0, 1, 0, 0]] - assert data.num_nodes == 4 - - data = Data(edge_index=edge_index, x=x) - data = OneHotDegree(max_degree=3)(data) - assert len(data) == 2 - assert data.edge_index.tolist() == edge_index.tolist() - assert data.x.tolist() == [[1, 0, 0, 0, 1], [1, 0, 1, 0, 0], - [1, 0, 1, 0, 0], [1, 0, 1, 0, 0]] diff --git a/pytorch_geometric-2.3.1/test/transforms/test_pad.py b/pytorch_geometric-2.3.1/test/transforms/test_pad.py deleted file mode 100644 index e1fea40..0000000 --- a/pytorch_geometric-2.3.1/test/transforms/test_pad.py +++ /dev/null @@ -1,537 +0,0 @@ -import numbers -from copy import deepcopy -from typing import Dict, Generator, List, Optional, Tuple, Union - -import pytest -import torch - -from torch_geometric.data import Data, HeteroData -from torch_geometric.datasets import FakeDataset, FakeHeteroDataset -from torch_geometric.transforms import Pad -from torch_geometric.transforms.pad import ( - AttrNamePadding, - EdgeTypePadding, - NodeTypePadding, - Padding, - UniformPadding, -) -from torch_geometric.typing import EdgeType, NodeType - - -def fake_data() -> Data: - return FakeDataset(avg_num_nodes=10, avg_degree=5, edge_dim=2)[0] - - -def fake_hetero_data(node_types=2, edge_types=5) -> HeteroData: - return FakeHeteroDataset(num_node_types=node_types, - num_edge_types=edge_types, avg_num_nodes=10, - edge_dim=2)[0] - - -def _generate_homodata_node_attrs(data: Data) -> Generator[str, None, None]: - for attr in data.keys: - if data.is_node_attr(attr): - yield attr - - -def _generate_homodata_edge_attrs(data: Data) -> Generator[str, None, None]: - for attr in data.keys: - if data.is_edge_attr(attr): - yield attr - - -def _generate_heterodata_nodes( - data: HeteroData -) -> Generator[Tuple[NodeType, str, torch.Tensor], None, None]: - for node_type, store in data.node_items(): - for attr in store.keys(): - yield node_type, attr - - -def _generate_heterodata_edges( - data: HeteroData -) -> Generator[Tuple[EdgeType, str, torch.Tensor], None, None]: - for edge_type, store in data.edge_items(): - for attr in store.keys(): - yield edge_type, attr - - -def _check_homo_data_nodes(original: Data, padded: Data, - max_num_nodes: Union[int, Dict[NodeType, int]], - node_pad_value: Optional[Padding] = None, - exclude_keys: Optional[List[str]] = None): - assert padded.num_nodes == max_num_nodes - - for attr in _generate_homodata_node_attrs(original): - if attr in exclude_keys: - assert attr not in padded.keys - continue - - assert attr in padded.keys - - if not isinstance(padded[attr], torch.Tensor): - continue - - assert padded[attr].shape[0] == max_num_nodes - compare_pad_start_idx = original[attr].shape[0] - # Check values in padded area. - pad_value = node_pad_value.get_value( - None, attr) if node_pad_value is not None else 0.0 - assert all( - i == pad_value - for i in torch.flatten(padded[attr][compare_pad_start_idx:])) - - # Check values in non-padded area. - assert torch.equal(original[attr], - padded[attr][:compare_pad_start_idx]) - - -def _check_homo_data_edges(original: Data, padded: Data, - max_num_edges: Optional[int] = None, - edge_pad_value: Optional[Padding] = None, - exclude_keys: Optional[List[str]] = None): - # Check edge index attribute. - if max_num_edges is None: - max_num_edges = padded.num_nodes**2 - assert padded.num_edges == max_num_edges - assert padded.edge_index.shape[1] == max_num_edges - assert padded.edge_index.shape[1] == max_num_edges - - compare_pad_start_idx = original.num_edges - expected_node = original.num_nodes - - # Check values in padded area. - assert all( - padded.edge_index[0, i] == padded.edge_index[1, i] == expected_node - for i in range(compare_pad_start_idx, max_num_edges)) - # Check values in non-padded area. - assert torch.equal(original.edge_index, - padded.edge_index[:, :compare_pad_start_idx]) - - # Check other attributes. - for attr in _generate_homodata_edge_attrs(original): - if attr == 'edge_index': - continue - if attr in exclude_keys: - assert attr not in padded.keys - continue - - assert attr in padded.keys - - if not isinstance(padded[attr], torch.Tensor): - continue - - assert padded[attr].shape[0] == max_num_edges - - # Check values in padded area. - pad_value = edge_pad_value.get_value( - None, attr) if edge_pad_value is not None else 0.0 - assert all( - i == pad_value - for i in torch.flatten(padded[attr][compare_pad_start_idx:, :])) - - # Check values in non-padded area. - assert torch.equal(original[attr], - padded[attr][:compare_pad_start_idx, :]) - - -def _check_hetero_data_nodes(original: HeteroData, padded: HeteroData, - max_num_nodes: Union[int, Dict[NodeType, int]], - node_pad_value: Optional[Padding] = None, - exclude_keys: Optional[List[str]] = None): - - expected_nodes = max_num_nodes - - for node_type, attr in _generate_heterodata_nodes(original): - if attr in exclude_keys: - assert attr not in padded[node_type].keys() - continue - - assert attr in padded[node_type].keys() - - if not isinstance(padded[node_type][attr], torch.Tensor): - continue - - original_tensor = original[node_type][attr] - padded_tensor = padded[node_type][attr] - - # Check the number of nodes. - if isinstance(max_num_nodes, dict): - expected_nodes = max_num_nodes[node_type] - - assert padded_tensor.shape[0] == expected_nodes - - compare_pad_start_idx = original_tensor.shape[0] - pad_value = node_pad_value.get_value( - node_type, attr) if node_pad_value is not None else 0.0 - assert all( - i == pad_value - for i in torch.flatten(padded_tensor[compare_pad_start_idx:])) - # Compare non-padded area with the original. - assert torch.equal(original_tensor, - padded_tensor[:compare_pad_start_idx]) - - -def _check_hetero_data_edges(original: HeteroData, padded: HeteroData, - max_num_edges: Optional[Union[int, - Dict[EdgeType, - int]]] = None, - edge_pad_value: Optional[Padding] = None, - exclude_keys: Optional[List[str]] = None): - - for edge_type, attr in _generate_heterodata_edges(padded): - if attr in exclude_keys: - assert attr not in padded[edge_type].keys() - continue - - assert attr in padded[edge_type].keys() - - if not isinstance(padded[edge_type][attr], torch.Tensor): - continue - - compare_pad_start_idx = original[edge_type].num_edges - original_tensor = original[edge_type][attr] - padded_tensor = padded[edge_type][attr] - - if isinstance(max_num_edges, numbers.Number): - expected_num_edges = max_num_edges - elif max_num_edges is None or edge_type not in max_num_edges.keys(): - v1, _, v2 = edge_type - expected_num_edges = padded[v1].num_nodes * padded[v2].num_nodes - else: - expected_num_edges = max_num_edges[edge_type] - - if attr == 'edge_index': - # Check the number of edges. - assert padded_tensor.shape[1] == expected_num_edges - - # Check padded area values. - src_nodes = original[edge_type[0]].num_nodes - assert all( - i == src_nodes - for i in torch.flatten(padded_tensor[0, - compare_pad_start_idx:])) - dst_nodes = original[edge_type[2]].num_nodes - assert all( - i == dst_nodes - for i in torch.flatten(padded_tensor[1, - compare_pad_start_idx:])) - - # Compare non-padded area with the original. - assert torch.equal(original_tensor, - padded_tensor[:, :compare_pad_start_idx]) - else: - # Check padded area size. - assert padded_tensor.shape[0] == expected_num_edges - - # Check padded area values. - pad_value = edge_pad_value.get_value( - edge_type, attr) if edge_pad_value is not None else 0.0 - assert all(i == pad_value for i in torch.flatten(padded_tensor[ - compare_pad_start_idx:, :])) - - # Compare non-padded area with the original. - assert torch.equal(original_tensor, - padded_tensor[:compare_pad_start_idx, :]) - - -def _check_data(original: Union[Data, HeteroData], padded: Union[Data, - HeteroData], - max_num_nodes: Union[int, Dict[NodeType, int]], - max_num_edges: Optional[Union[int, Dict[EdgeType, - int]]] = None, - node_pad_value: Optional[Union[Padding, int, float]] = None, - edge_pad_value: Optional[Union[Padding, int, float]] = None, - exclude_keys: Optional[List[str]] = None): - - if not isinstance(node_pad_value, Padding) and node_pad_value is not None: - node_pad_value = UniformPadding(node_pad_value) - if not isinstance(edge_pad_value, Padding) and edge_pad_value is not None: - edge_pad_value = UniformPadding(edge_pad_value) - - if exclude_keys is None: - exclude_keys = [] - - if isinstance(original, Data): - _check_homo_data_nodes(original, padded, max_num_nodes, node_pad_value, - exclude_keys) - _check_homo_data_edges(original, padded, max_num_edges, edge_pad_value, - exclude_keys) - else: - _check_hetero_data_nodes(original, padded, max_num_nodes, - node_pad_value, exclude_keys) - _check_hetero_data_edges(original, padded, max_num_edges, - edge_pad_value, exclude_keys) - - -def test_pad_repr(): - pad_str = 'Pad(max_num_nodes=10, max_num_edges=15, ' \ - 'node_pad_value=UniformPadding(value=3.0), ' \ - 'edge_pad_value=UniformPadding(value=1.5))' - assert str(eval(pad_str)) == pad_str - - -@pytest.mark.parametrize('data', [fake_data(), fake_hetero_data()]) -@pytest.mark.parametrize('num_nodes', [32, 64]) -def test_pad_auto_edges(data, num_nodes): - original = data - data = deepcopy(data) - transform = Pad(max_num_nodes=num_nodes) - - padded = transform(data) - _check_data(original, padded, num_nodes) - - -@pytest.mark.parametrize('num_nodes', [32, 64]) -@pytest.mark.parametrize('num_edges', [300, 411]) -def test_pad_data_explicit_edges(num_nodes, num_edges): - data = fake_data() - original = deepcopy(data) - transform = Pad(max_num_nodes=num_nodes, max_num_edges=num_edges) - - padded = transform(data) - _check_data(original, padded, num_nodes, num_edges) - - -@pytest.mark.parametrize('num_nodes', [32, {'v0': 64, 'v1': 36}]) -@pytest.mark.parametrize('num_edges', [300, {('v0', 'e0', 'v1'): 397}]) -def test_pad_heterodata_explicit_edges(num_nodes, num_edges): - data = fake_hetero_data() - original = deepcopy(data) - transform = Pad(max_num_nodes=num_nodes, max_num_edges=num_edges) - - padded = transform(data) - _check_data(original, padded, num_nodes, num_edges) - - -@pytest.mark.parametrize('node_pad_value', [10, AttrNamePadding({'x': 3.0})]) -@pytest.mark.parametrize('edge_pad_value', - [11, AttrNamePadding({'edge_attr': 2.0})]) -def test_pad_data_pad_values(node_pad_value, edge_pad_value): - data = fake_data() - original = deepcopy(data) - num_nodes = 32 - transform = Pad(max_num_nodes=num_nodes, node_pad_value=node_pad_value, - edge_pad_value=edge_pad_value) - padded = transform(data) - _check_data(original, padded, num_nodes, node_pad_value=node_pad_value, - edge_pad_value=edge_pad_value) - - -@pytest.mark.parametrize('node_pad_value', [ - UniformPadding(12), - AttrNamePadding({'x': 0}), - NodeTypePadding({ - 'v0': UniformPadding(12), - 'v1': AttrNamePadding({'x': 7}) - }) -]) -@pytest.mark.parametrize('edge_pad_value', [ - UniformPadding(13), - EdgeTypePadding({ - ('v0', 'e0', 'v1'): - UniformPadding(13), - ('v1', 'e0', 'v0'): - AttrNamePadding({'edge_attr': UniformPadding(-1.0)}) - }) -]) -def test_pad_heterodata_pad_values(node_pad_value, edge_pad_value): - data = fake_hetero_data() - original = deepcopy(data) - num_nodes = 32 - transform = Pad(max_num_nodes=num_nodes, node_pad_value=node_pad_value, - edge_pad_value=edge_pad_value) - - padded = transform(data) - _check_data(original, padded, num_nodes, node_pad_value=node_pad_value, - edge_pad_value=edge_pad_value) - - -@pytest.mark.parametrize('data', [fake_data(), fake_hetero_data()]) -@pytest.mark.parametrize('exclude_keys', - [['y'], ['edge_attr'], ['y', 'edge_attr']]) -def test_pad_data_exclude_keys(data, exclude_keys): - original = data - data = deepcopy(data) - num_nodes = 32 - transform = Pad(max_num_nodes=num_nodes, exclude_keys=exclude_keys) - - padded = transform(data) - _check_data(original, padded, num_nodes, exclude_keys=exclude_keys) - - -@pytest.mark.parametrize('data', [fake_data(), fake_hetero_data(node_types=1)]) -def test_pad_invalid_max_num_nodes(data): - transform = Pad(max_num_nodes=data.num_nodes - 1) - - with pytest.raises(AssertionError, - match='The number of nodes after padding'): - transform(data) - - -@pytest.mark.parametrize( - 'data', - [fake_data(), fake_hetero_data(node_types=1, edge_types=1)]) -def test_pad_invalid_max_num_edges(data): - transform = Pad(max_num_nodes=data.num_nodes + 10, - max_num_edges=data.num_edges - 1) - - with pytest.raises(AssertionError, - match='The number of edges after padding'): - transform(data) - - -def test_pad_num_nodes_not_complete(): - data = fake_hetero_data(node_types=2, edge_types=1) - transform = Pad(max_num_nodes={'v0': 100}) - - with pytest.raises(AssertionError, match='The number of v1 nodes'): - transform(data) - - -def test_pad_invalid_padding_type(): - with pytest.raises(ValueError, match="to be an integer or float"): - Pad(max_num_nodes=100, node_pad_value='somestring') - with pytest.raises(ValueError, match="to be an integer or float"): - Pad(max_num_nodes=100, edge_pad_value='somestring') - - -def test_pad_data_non_tensor_attr(): - data = deepcopy(fake_data()) - batch_size = 13 - data.batch_size = batch_size - - transform = Pad(max_num_nodes=100) - padded = transform(data) - assert padded.batch_size == batch_size - - exclude_transform = Pad(max_num_nodes=101, exclude_keys=('batch_size', )) - padded = exclude_transform(data) - assert 'batch_size' not in padded.keys - - -@pytest.mark.parametrize('mask_pad_value', [True, False]) -def test_pad_node_additional_attr_mask(mask_pad_value): - data = fake_data() - mask = torch.randn(data.num_nodes) > 0 - mask_names = ['train_mask', 'test_mask', 'val_mask'] - for mask_name in mask_names: - setattr(data, mask_name, mask) - padding_num = 20 - - max_num_nodes = int(data.num_nodes) + padding_num - max_num_edges = data.num_edges + padding_num - - transform = Pad(max_num_nodes, max_num_edges, node_pad_value=0.1, - mask_pad_value=mask_pad_value) - padded = transform(data) - padded_masks = [getattr(padded, mask_name) for mask_name in mask_names] - - for padded_mask in padded_masks: - assert padded_mask.ndim == 1 - assert padded_mask.size()[0] == max_num_nodes - assert torch.all(padded_mask[-padding_num:] == mask_pad_value) - - -def test_uniform_padding(): - pad_val = 10.0 - p = UniformPadding(pad_val) - assert p.get_value() == pad_val - assert p.get_value("v1", "x") == pad_val - - p = UniformPadding() - assert p.get_value() == 0.0 - - with pytest.raises(ValueError, match="to be an integer or float"): - UniformPadding('') - - -def test_attr_name_padding(): - x_val = 10.0 - y_val = 15.0 - default = 3.0 - padding_dict = {'x': x_val, 'y': UniformPadding(y_val)} - padding = AttrNamePadding(padding_dict, default=default) - - assert padding.get_value(attr_name='x') == x_val - assert padding.get_value('v1', 'x') == x_val - assert padding.get_value(attr_name='y') == y_val - assert padding.get_value('v1', 'y') == y_val - assert padding.get_value(attr_name='x2') == default - - padding = AttrNamePadding({}) - assert padding.get_value(attr_name='x') == 0.0 - - -def test_attr_name_padding_invalid(): - with pytest.raises(ValueError, match="to be a dictionary"): - AttrNamePadding(10.0) - - with pytest.raises(ValueError, match="to be a string"): - AttrNamePadding({10: 10.0}) - - with pytest.raises(ValueError, match="to be of type"): - AttrNamePadding({"x": {}}) - - with pytest.raises(ValueError, match="to be of type"): - AttrNamePadding({"x": {}}) - - node_type_padding = NodeTypePadding({"x": 10.0}) - with pytest.raises(ValueError, match="to be of type"): - AttrNamePadding({'x': node_type_padding}) - - -@pytest.mark.parametrize('store_type', ['node', 'edge']) -def test_node_edge_type_padding(store_type): - if store_type == "node": - stores = ['v1', 'v2', 'v3', 'v4'] - padding_cls = NodeTypePadding - else: - stores = [('v1', 'e1', 'v1'), ('v1', 'e2', 'v1'), ('v1', 'e1', 'v2'), - ('v2', 'e1', 'v1')] - padding_cls = EdgeTypePadding - - s0_default = 3.0 - s0_padding_dict = {'x': 10.0, 'y': -12.0} - s0_padding = AttrNamePadding(s0_padding_dict, s0_default) - s1_default = 0.1 - s1_padding_dict = {'y': 0.0, 'p': 13.0} - s1_padding = AttrNamePadding(s1_padding_dict, s1_default) - - s2_default = 7.5 - store_default = -11.0 - padding_dict = { - stores[0]: s0_padding, - stores[1]: s1_padding, - stores[2]: s2_default - } - padding = padding_cls(padding_dict, store_default) - - assert padding.get_value(stores[0], 'x') == s0_padding_dict['x'] - assert padding.get_value(stores[0], 'y') == s0_padding_dict['y'] - assert padding.get_value(stores[0], 'p') == s0_default - assert padding.get_value(stores[0], 'z') == s0_default - - assert padding.get_value(stores[1], 'x') == s1_default - assert padding.get_value(stores[1], 'y') == s1_padding_dict['y'] - assert padding.get_value(stores[1], 'p') == s1_padding_dict['p'] - assert padding.get_value(stores[1], 'z') == s1_default - - assert padding.get_value(stores[2], 'x') == s2_default - assert padding.get_value(stores[2], 'z') == s2_default - - assert padding.get_value(stores[3], 'x') == store_default - - -def test_edge_padding_invalid(): - with pytest.raises(ValueError, match="to be a tuple"): - EdgeTypePadding({'v1': 10.0}) - - with pytest.raises(ValueError, match="got 1"): - EdgeTypePadding({('v1', ): 10.0}) - - with pytest.raises(ValueError, match="got 2"): - EdgeTypePadding({('v1', 'v2'): 10.0}) - - with pytest.raises(ValueError, match="got 4"): - EdgeTypePadding({('v1', 'e2', 'v1', 'v2'): 10.0}) diff --git a/pytorch_geometric-2.3.1/test/transforms/test_point_pair_features.py b/pytorch_geometric-2.3.1/test/transforms/test_point_pair_features.py deleted file mode 100644 index bb79af6..0000000 --- a/pytorch_geometric-2.3.1/test/transforms/test_point_pair_features.py +++ /dev/null @@ -1,36 +0,0 @@ -from math import pi as PI - -import torch - -from torch_geometric.data import Data -from torch_geometric.transforms import PointPairFeatures - - -def test_point_pair_features(): - transform = PointPairFeatures() - assert str(transform) == 'PointPairFeatures()' - - pos = torch.Tensor([[0, 0, 0], [1, 0, 0]]) - edge_index = torch.tensor([[0, 1], [1, 0]]) - norm = torch.Tensor([[1, 0, 0], [1, 0, 0]]) - edge_attr = torch.Tensor([1, 1]) - data = Data(edge_index=edge_index, pos=pos, norm=norm) - - data = transform(data) - assert len(data) == 4 - assert data.pos.tolist() == pos.tolist() - assert data.norm.tolist() == norm.tolist() - assert data.edge_index.tolist() == edge_index.tolist() - assert torch.allclose(data.edge_attr, - torch.Tensor([[1, 0, 0, 0], [1, PI, PI, 0]]), - atol=1e-04) - - data = Data(edge_index=edge_index, pos=pos, norm=norm, edge_attr=edge_attr) - data = transform(data) - assert len(data) == 4 - assert data.pos.tolist() == pos.tolist() - assert data.norm.tolist() == norm.tolist() - assert data.edge_index.tolist() == edge_index.tolist() - assert torch.allclose(data.edge_attr, - torch.Tensor([[1, 1, 0, 0, 0], [1, 1, PI, PI, 0]]), - atol=1e-04) diff --git a/pytorch_geometric-2.3.1/test/transforms/test_polar.py b/pytorch_geometric-2.3.1/test/transforms/test_polar.py deleted file mode 100644 index 4fe9856..0000000 --- a/pytorch_geometric-2.3.1/test/transforms/test_polar.py +++ /dev/null @@ -1,30 +0,0 @@ -from math import pi as PI - -import torch - -from torch_geometric.data import Data -from torch_geometric.transforms import Polar - - -def test_polar(): - assert str(Polar()) == 'Polar(norm=True, max_value=None)' - - pos = torch.Tensor([[0, 0], [1, 0]]) - edge_index = torch.tensor([[0, 1], [1, 0]]) - edge_attr = torch.Tensor([1, 1]) - - data = Data(edge_index=edge_index, pos=pos) - data = Polar(norm=False)(data) - assert len(data) == 3 - assert data.pos.tolist() == pos.tolist() - assert data.edge_index.tolist() == edge_index.tolist() - assert torch.allclose(data.edge_attr, torch.Tensor([[1, 0], [1, PI]]), - atol=1e-04) - - data = Data(edge_index=edge_index, pos=pos, edge_attr=edge_attr) - data = Polar(norm=True)(data) - assert len(data) == 3 - assert data.pos.tolist() == pos.tolist() - assert data.edge_index.tolist() == edge_index.tolist() - assert torch.allclose(data.edge_attr, - torch.Tensor([[1, 1, 0], [1, 1, 0.5]]), atol=1e-04) diff --git a/pytorch_geometric-2.3.1/test/transforms/test_random_flip.py b/pytorch_geometric-2.3.1/test/transforms/test_random_flip.py deleted file mode 100644 index f32ed9d..0000000 --- a/pytorch_geometric-2.3.1/test/transforms/test_random_flip.py +++ /dev/null @@ -1,20 +0,0 @@ -import torch - -from torch_geometric.data import Data -from torch_geometric.transforms import RandomFlip - - -def test_random_flip(): - assert str(RandomFlip(axis=0)) == 'RandomFlip(axis=0, p=0.5)' - - pos = torch.Tensor([[-1, 1], [-3, 0], [2, -1]]) - - data = Data(pos=pos) - data = RandomFlip(axis=0, p=1)(data) - assert len(data) == 1 - assert data.pos.tolist() == [[1, 1], [3, 0], [-2, -1]] - - data = Data(pos=pos) - data = RandomFlip(axis=1, p=1)(data) - assert len(data) == 1 - assert data.pos.tolist() == [[-1, -1], [-3, 0], [2, 1]] diff --git a/pytorch_geometric-2.3.1/test/transforms/test_random_jitter.py b/pytorch_geometric-2.3.1/test/transforms/test_random_jitter.py deleted file mode 100644 index 17753fe..0000000 --- a/pytorch_geometric-2.3.1/test/transforms/test_random_jitter.py +++ /dev/null @@ -1,29 +0,0 @@ -import torch - -from torch_geometric.data import Data -from torch_geometric.transforms import RandomJitter - - -def test_random_jitter(): - assert str(RandomJitter(0.1)) == 'RandomJitter(0.1)' - - pos = torch.Tensor([[0, 0], [0, 0], [0, 0], [0, 0]]) - - data = Data(pos=pos) - data = RandomJitter(0)(data) - assert len(data) == 1 - assert data.pos.tolist() == pos.tolist() - - data = Data(pos=pos) - data = RandomJitter(0.1)(data) - assert len(data) == 1 - assert data.pos.min().item() >= -0.1 - assert data.pos.max().item() <= 0.1 - - data = Data(pos=pos) - data = RandomJitter([0.1, 1])(data) - assert len(data) == 1 - assert data.pos[:, 0].min().item() >= -0.1 - assert data.pos[:, 0].max().item() <= 0.1 - assert data.pos[:, 1].min().item() >= -1 - assert data.pos[:, 1].max().item() <= 1 diff --git a/pytorch_geometric-2.3.1/test/transforms/test_random_shear.py b/pytorch_geometric-2.3.1/test/transforms/test_random_shear.py deleted file mode 100644 index 33f3f6a..0000000 --- a/pytorch_geometric-2.3.1/test/transforms/test_random_shear.py +++ /dev/null @@ -1,20 +0,0 @@ -import torch - -from torch_geometric.data import Data -from torch_geometric.transforms import RandomShear - - -def test_random_shear(): - assert str(RandomShear(0.1)) == 'RandomShear(0.1)' - - pos = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) - - data = Data(pos=pos) - data = RandomShear(0)(data) - assert len(data) == 1 - assert data.pos.tolist() == pos.tolist() - - data = Data(pos=pos) - data = RandomShear(0.1)(data) - assert len(data) == 1 - assert data.pos.tolist() != pos.tolist() diff --git a/pytorch_geometric-2.3.1/test/transforms/test_sample_points.py b/pytorch_geometric-2.3.1/test/transforms/test_sample_points.py deleted file mode 100644 index 4d47171..0000000 --- a/pytorch_geometric-2.3.1/test/transforms/test_sample_points.py +++ /dev/null @@ -1,26 +0,0 @@ -import torch - -from torch_geometric.data import Data -from torch_geometric.transforms import SamplePoints - - -def test_sample_points(): - assert str(SamplePoints(1024)) == 'SamplePoints(1024)' - - pos = torch.Tensor([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]]) - face = torch.tensor([[0, 1], [1, 2], [2, 3]]) - - data = Data(pos=pos) - data.face = face - data = SamplePoints(8)(data) - assert len(data) == 1 - assert pos[:, 0].min().item() >= 0 and pos[:, 0].max().item() <= 1 - assert pos[:, 1].min().item() >= 0 and pos[:, 1].max().item() <= 1 - assert pos[:, 2].abs().sum().item() == 0 - - data = Data(pos=pos) - data.face = face - data = SamplePoints(8, include_normals=True)(data) - assert len(data) == 2 - assert data.normal[:, :2].abs().sum().item() == 0 - assert data.normal[:, 2].abs().sum().item() == 8 diff --git a/pytorch_geometric-2.3.1/test/transforms/test_spherical.py b/pytorch_geometric-2.3.1/test/transforms/test_spherical.py deleted file mode 100644 index 78fbd12..0000000 --- a/pytorch_geometric-2.3.1/test/transforms/test_spherical.py +++ /dev/null @@ -1,52 +0,0 @@ -from math import pi as PI - -import torch - -from torch_geometric.data import Data -from torch_geometric.transforms import Spherical - - -def test_spherical(): - assert str(Spherical()) == 'Spherical(norm=True, max_value=None)' - - pos = torch.Tensor([[0, 0, 0], [1, 0, 0]]) - edge_index = torch.tensor([[0, 1], [1, 0]]) - edge_attr = torch.Tensor([1, 1]) - - data = Data(edge_index=edge_index, pos=pos) - data = Spherical(norm=False)(data) - assert len(data) == 3 - assert data.pos.tolist() == pos.tolist() - assert data.edge_index.tolist() == edge_index.tolist() - assert torch.allclose(data.edge_attr, - torch.Tensor([[1, 0, PI / 2], [1, PI, PI / 2]]), - atol=1e-04) - - data = Data(edge_index=edge_index, pos=pos, edge_attr=edge_attr) - data = Spherical(norm=True)(data) - assert len(data) == 3 - assert data.pos.tolist() == pos.tolist() - assert data.edge_index.tolist() == edge_index.tolist() - assert torch.allclose(data.edge_attr, - torch.Tensor([[1, 1, 0, 0.5], [1, 1, 0.5, 0.5]]), - atol=1e-04) - - pos = torch.Tensor([[0, 0, 0], [0, 0, 1]]) - edge_index = torch.tensor([[0, 1], [1, 0]]) - - data = Data(edge_index=edge_index, pos=pos) - data = Spherical(norm=False)(data) - assert len(data) == 3 - assert data.pos.tolist() == pos.tolist() - assert data.edge_index.tolist() == edge_index.tolist() - assert torch.allclose(data.edge_attr, - torch.Tensor([[1, 0, 0], [1, 0, PI]]), atol=1e-04) - - data = Data(edge_index=edge_index, pos=pos, edge_attr=edge_attr) - data = Spherical(norm=True)(data) - assert len(data) == 3 - assert data.pos.tolist() == pos.tolist() - assert data.edge_index.tolist() == edge_index.tolist() - assert torch.allclose(data.edge_attr, - torch.Tensor([[1, 1, 0, 0], [1, 1, 0, 1]]), - atol=1e-04) diff --git a/pytorch_geometric-2.3.1/test/utils/test_assortativity.py b/pytorch_geometric-2.3.1/test/utils/test_assortativity.py deleted file mode 100644 index 58b5fdb..0000000 --- a/pytorch_geometric-2.3.1/test/utils/test_assortativity.py +++ /dev/null @@ -1,27 +0,0 @@ -import pytest -import torch -from torch_sparse import SparseTensor - -from torch_geometric.utils import assortativity - - -def test_assortativity(): - # Completely assortative graph: - edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5], - [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 5, 4]]) - out = assortativity(edge_index) - assert pytest.approx(out, abs=1e-5) == 1.0 - - adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=[6, 6]) - out = assortativity(adj) - assert pytest.approx(out, abs=1e-5) == 1.0 - - # Completely disassortative graph: - edge_index = torch.tensor([[0, 1, 2, 3, 4, 5, 5, 5, 5, 5], - [5, 5, 5, 5, 5, 0, 1, 2, 3, 4]]) - out = assortativity(edge_index) - assert pytest.approx(out, abs=1e-5) == -1.0 - - adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=[6, 6]) - out = assortativity(adj) - assert pytest.approx(out, abs=1e-5) == -1.0 diff --git a/pytorch_geometric-2.3.1/test/utils/test_convert.py b/pytorch_geometric-2.3.1/test/utils/test_convert.py deleted file mode 100644 index a00ae61..0000000 --- a/pytorch_geometric-2.3.1/test/utils/test_convert.py +++ /dev/null @@ -1,483 +0,0 @@ -import pytest -import scipy.sparse -import torch - -from torch_geometric.data import Data -from torch_geometric.testing import withPackage -from torch_geometric.utils import ( - from_cugraph, - from_networkit, - from_networkx, - from_scipy_sparse_matrix, - from_trimesh, - sort_edge_index, - subgraph, - to_cugraph, - to_networkit, - to_networkx, - to_scipy_sparse_matrix, - to_trimesh, -) - - -def test_to_scipy_sparse_matrix(): - edge_index = torch.tensor([[0, 1, 0], [1, 0, 0]]) - - adj = to_scipy_sparse_matrix(edge_index) - assert isinstance(adj, scipy.sparse.coo_matrix) is True - assert adj.shape == (2, 2) - assert adj.row.tolist() == edge_index[0].tolist() - assert adj.col.tolist() == edge_index[1].tolist() - assert adj.data.tolist() == [1, 1, 1] - - edge_attr = torch.Tensor([1, 2, 3]) - adj = to_scipy_sparse_matrix(edge_index, edge_attr) - assert isinstance(adj, scipy.sparse.coo_matrix) is True - assert adj.shape == (2, 2) - assert adj.row.tolist() == edge_index[0].tolist() - assert adj.col.tolist() == edge_index[1].tolist() - assert adj.data.tolist() == edge_attr.tolist() - - -def test_from_scipy_sparse_matrix(): - edge_index = torch.tensor([[0, 1, 0], [1, 0, 0]]) - adj = to_scipy_sparse_matrix(edge_index) - - out = from_scipy_sparse_matrix(adj) - assert out[0].tolist() == edge_index.tolist() - assert out[1].tolist() == [1, 1, 1] - - -@withPackage('networkx') -def test_to_networkx(): - import networkx as nx - - x = torch.Tensor([[1, 2], [3, 4]]) - pos = torch.Tensor([[0, 0], [1, 1]]) - edge_index = torch.tensor([[0, 1, 0], [1, 0, 0]]) - edge_attr = torch.Tensor([1, 2, 3]) - data = Data(x=x, pos=pos, edge_index=edge_index, weight=edge_attr) - - for remove_self_loops in [True, False]: - G = to_networkx(data, node_attrs=['x', 'pos'], edge_attrs=['weight'], - remove_self_loops=remove_self_loops) - - assert G.nodes[0]['x'] == [1, 2] - assert G.nodes[1]['x'] == [3, 4] - assert G.nodes[0]['pos'] == [0, 0] - assert G.nodes[1]['pos'] == [1, 1] - - if remove_self_loops: - assert nx.to_numpy_array(G).tolist() == [[0, 1], [2, 0]] - else: - assert nx.to_numpy_array(G).tolist() == [[3, 1], [2, 0]] - - -@withPackage('networkx') -def test_from_networkx_set_node_attributes(): - import networkx as nx - - G = nx.path_graph(3) - attrs = { - 0: { - 'x': torch.tensor([1, 0, 0]) - }, - 1: { - 'x': torch.tensor([0, 1, 0]) - }, - 2: { - 'x': torch.tensor([0, 0, 1]) - }, - } - nx.set_node_attributes(G, attrs) - - assert from_networkx(G).x.tolist() == [[1, 0, 0], [0, 1, 0], [0, 0, 1]] - - -@withPackage('networkx') -def test_to_networkx_undirected(): - import networkx as nx - - x = torch.Tensor([[1, 2], [3, 4]]) - pos = torch.Tensor([[0, 0], [1, 1]]) - edge_index = torch.tensor([[0, 1, 0], [1, 0, 0]]) - edge_attr = torch.Tensor([1, 2, 3]) - data = Data(x=x, pos=pos, edge_index=edge_index, weight=edge_attr) - - for remove_self_loops in [True, False]: - G = to_networkx(data, node_attrs=['x', 'pos'], edge_attrs=['weight'], - remove_self_loops=remove_self_loops, - to_undirected=True) - - assert G.nodes[0]['x'] == [1, 2] - assert G.nodes[1]['x'] == [3, 4] - assert G.nodes[0]['pos'] == [0, 0] - assert G.nodes[1]['pos'] == [1, 1] - - if remove_self_loops: - assert nx.to_numpy_array(G).tolist() == [[0, 1], [1, 0]] - else: - assert nx.to_numpy_array(G).tolist() == [[3, 1], [1, 0]] - - G = to_networkx(data, edge_attrs=['weight'], to_undirected=False) - assert nx.to_numpy_array(G).tolist() == [[3, 1], [2, 0]] - - G = to_networkx(data, edge_attrs=['weight'], to_undirected="upper") - assert nx.to_numpy_array(G).tolist() == [[3, 1], [1, 0]] - - G = to_networkx(data, edge_attrs=['weight'], to_undirected="lower") - assert nx.to_numpy_array(G).tolist() == [[3, 2], [2, 0]] - - -@withPackage('networkx') -def test_from_networkx(): - x = torch.randn(2, 8) - pos = torch.randn(2, 3) - edge_index = torch.tensor([[0, 1, 0], [1, 0, 0]]) - edge_attr = torch.randn(edge_index.size(1)) - perm = torch.tensor([0, 2, 1]) - data = Data(x=x, pos=pos, edge_index=edge_index, edge_attr=edge_attr) - G = to_networkx(data, node_attrs=['x', 'pos'], edge_attrs=['edge_attr']) - data = from_networkx(G) - assert len(data) == 4 - assert data.x.tolist() == x.tolist() - assert data.pos.tolist() == pos.tolist() - assert data.edge_index.tolist() == edge_index[:, perm].tolist() - assert data.edge_attr.tolist() == edge_attr[perm].tolist() - - -@withPackage('networkx') -def test_from_networkx_group_attrs(): - x = torch.randn(2, 2) - x1 = torch.randn(2, 4) - x2 = torch.randn(2, 8) - edge_index = torch.tensor([[0, 1, 0], [1, 0, 0]]) - edge_attr1 = torch.randn(edge_index.size(1)) - edge_attr2 = torch.randn(edge_index.size(1)) - perm = torch.tensor([0, 2, 1]) - data = Data(x=x, x1=x1, x2=x2, edge_index=edge_index, - edge_attr1=edge_attr1, edge_attr2=edge_attr2) - G = to_networkx(data, node_attrs=['x', 'x1', 'x2'], - edge_attrs=['edge_attr1', 'edge_attr2']) - data = from_networkx(G, group_node_attrs=['x', 'x2'], group_edge_attrs=all) - assert len(data) == 4 - assert data.x.tolist() == torch.cat([x, x2], dim=-1).tolist() - assert data.x1.tolist() == x1.tolist() - assert data.edge_index.tolist() == edge_index[:, perm].tolist() - assert data.edge_attr.tolist() == torch.stack([edge_attr1, edge_attr2], - dim=-1)[perm].tolist() - - -@withPackage('networkx') -def test_networkx_vice_versa_convert(): - import networkx as nx - - G = nx.complete_graph(5) - assert G.is_directed() is False - data = from_networkx(G) - assert data.is_directed() is False - G = to_networkx(data) - assert G.is_directed() is True - G = nx.to_undirected(G) - assert G.is_directed() is False - - -@withPackage('networkx') -def test_from_networkx_non_consecutive(): - import networkx as nx - - graph = nx.Graph() - graph.add_node(4) - graph.add_node(2) - graph.add_edge(4, 2) - for node in graph.nodes(): - graph.nodes[node]['x'] = node - - data = from_networkx(graph) - assert len(data) == 2 - assert data.x.tolist() == [4, 2] - assert data.edge_index.tolist() == [[0, 1], [1, 0]] - - -@withPackage('networkx') -def test_from_networkx_inverse(): - import networkx as nx - - graph = nx.Graph() - graph.add_node(3) - graph.add_node(2) - graph.add_node(1) - graph.add_node(0) - graph.add_edge(3, 1) - graph.add_edge(2, 1) - graph.add_edge(1, 0) - - data = from_networkx(graph) - assert len(data) == 2 - assert data.edge_index.tolist() == [[0, 1, 2, 2, 2, 3], [2, 2, 0, 1, 3, 2]] - assert data.num_nodes == 4 - - -@withPackage('networkx') -def test_from_networkx_non_numeric_labels(): - import networkx as nx - - graph = nx.Graph() - graph.add_node('4') - graph.add_node('2') - graph.add_edge('4', '2') - for node in graph.nodes(): - graph.nodes[node]['x'] = node - data = from_networkx(graph) - assert len(data) == 2 - assert data.x == ['4', '2'] - assert data.edge_index.tolist() == [[0, 1], [1, 0]] - - -@withPackage('networkx') -def test_from_networkx_without_edges(): - import networkx as nx - - graph = nx.Graph() - graph.add_node(1) - graph.add_node(2) - data = from_networkx(graph) - assert len(data) == 2 - assert data.edge_index.size() == (2, 0) - assert data.num_nodes == 2 - - -@withPackage('networkx') -def test_from_networkx_with_same_node_and_edge_attributes(): - import networkx as nx - - G = nx.Graph() - G.add_nodes_from([(0, {'age': 1}), (1, {'age': 6}), (2, {'age': 5})]) - G.add_edges_from([(0, 1, {'age': 2}), (1, 2, {'age': 7})]) - - data = from_networkx(G) - assert len(data) == 4 - assert data.age.tolist() == [1, 6, 5] - assert data.num_nodes == 3 - assert data.edge_index.tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]] - assert data.edge_age.tolist() == [2, 2, 7, 7] - - data = from_networkx(G, group_node_attrs=all, group_edge_attrs=all) - assert len(data) == 3 - assert data.x.tolist() == [[1], [6], [5]] - assert data.edge_index.tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]] - assert data.edge_attr.tolist() == [[2], [2], [7], [7]] - - -@withPackage('networkx') -def test_from_networkx_subgraph_convert(): - import networkx as nx - - G = nx.complete_graph(5) - - edge_index = from_networkx(G).edge_index - sub_edge_index_1, _ = subgraph([0, 1, 3, 4], edge_index, - relabel_nodes=True) - - sub_edge_index_2 = from_networkx(G.subgraph([0, 1, 3, 4])).edge_index - - assert sub_edge_index_1.tolist() == sub_edge_index_2.tolist() - - -@withPackage('networkit') -def test_to_networkit_vice_versa(): - edge_index = torch.tensor([[0, 1], [1, 0]]) - - g = to_networkit(edge_index, directed=False) - assert not g.isDirected() - assert not g.isWeighted() - - edge_index, edge_weight = from_networkit(g) - assert edge_index.tolist() == [[0, 1], [1, 0]] - assert edge_weight is None - - -@withPackage('networkit') -@pytest.mark.parametrize('directed', [True, False]) -@pytest.mark.parametrize('num_nodes', [None, 3]) -@pytest.mark.parametrize('edge_weight', [None, torch.rand(3)]) -def test_to_networkit(directed, edge_weight, num_nodes): - import networkit - - edge_index = torch.tensor([[0, 1, 1], [1, 0, 2]], dtype=torch.long) - g = to_networkit(edge_index, edge_weight, num_nodes, directed) - - assert isinstance(g, networkit.Graph) - assert g.isDirected() == directed - assert g.numberOfNodes() == 3 - - if edge_weight is None: - edge_weight = torch.tensor([1., 1., 1.]) - - assert g.weight(0, 1) == float(edge_weight[0]) - assert g.weight(1, 2) == float(edge_weight[2]) - - if directed: - assert g.numberOfEdges() == 3 - assert g.weight(1, 0) == float(edge_weight[1]) - else: - assert g.numberOfEdges() == 2 - - -@pytest.mark.parametrize('directed', [True, False]) -@pytest.mark.parametrize('weighted', [True, False]) -@withPackage('networkit') -def test_from_networkit(directed, weighted): - import networkit - - g = networkit.Graph(3, weighted=weighted, directed=directed) - g.addEdge(0, 1) - g.addEdge(1, 2) - if directed: - g.addEdge(1, 0) - - if weighted: - for i, (u, v) in enumerate(g.iterEdges()): - g.setWeight(u, v, i + 1) - - edge_index, edge_weight = from_networkit(g) - - if directed: - assert edge_index.tolist() == [[0, 1, 1], [1, 2, 0]] - if weighted: - assert edge_weight.tolist() == [1, 2, 3] - else: - assert edge_weight is None - else: - assert edge_index.tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]] - if weighted: - assert edge_weight.tolist() == [1, 1, 2, 2] - else: - assert edge_weight is None - - -@withPackage('trimesh') -def test_trimesh_vice_versa(): - pos = torch.tensor([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]], - dtype=torch.float) - face = torch.tensor([[0, 1, 2], [1, 2, 3]]).t() - - data = Data(pos=pos, face=face) - mesh = to_trimesh(data) - data = from_trimesh(mesh) - - assert pos.tolist() == data.pos.tolist() - assert face.tolist() == data.face.tolist() - - -@withPackage('trimesh') -def test_to_trimesh(): - import trimesh - - pos = torch.tensor([[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0]]) - face = torch.tensor([[0, 1, 2], [2, 1, 3]]).t() - data = Data(pos=pos, face=face) - - obj = to_trimesh(data) - - assert isinstance(obj, trimesh.Trimesh) - assert obj.vertices.shape == (4, 3) - assert obj.faces.shape == (2, 3) - assert obj.vertices.tolist() == data.pos.tolist() - assert obj.faces.tolist() == data.face.t().contiguous().tolist() - - -@withPackage('trimesh') -def test_from_trimesh(): - import trimesh - - vertices = [[0, 0, 0], [1, 0, 0], [0, 1, 0]] - faces = [[0, 1, 2]] - mesh = trimesh.Trimesh(vertices=vertices, faces=faces, process=False) - - data = from_trimesh(mesh) - - assert data.pos.tolist() == vertices - assert data.face.t().contiguous().tolist() == faces - - -@withPackage('cudf') -@withPackage('cugraph') -@pytest.mark.parametrize('edge_weight', [None, torch.rand(4)]) -@pytest.mark.parametrize('relabel_nodes', [True, False]) -@pytest.mark.parametrize('directed', [True, False]) -def test_to_cugraph(edge_weight, directed, relabel_nodes): - import cugraph - - if directed: - edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - else: - edge_index = torch.tensor([[0, 1], [1, 2]]) - - if edge_weight is not None: - edge_weight = edge_weight[:edge_index.size(1)] - - graph = to_cugraph(edge_index, edge_weight, relabel_nodes, directed) - assert isinstance(graph, cugraph.Graph) - assert graph.number_of_nodes() == 3 - - edge_list = graph.view_edge_list() - assert edge_list is not None - - edge_list = edge_list.sort_values(by=['src', 'dst']) - - cu_edge_index = edge_list[['src', 'dst']].to_pandas().values - cu_edge_index = torch.from_numpy(cu_edge_index).t() - cu_edge_weight = None - if edge_weight is not None: - cu_edge_weight = edge_list['weights'].to_pandas().values - cu_edge_weight = torch.from_numpy(cu_edge_weight) - - cu_edge_index, cu_edge_weight = sort_edge_index(cu_edge_index, - cu_edge_weight) - - assert torch.equal(edge_index, cu_edge_index.cpu()) - if edge_weight is not None: - assert torch.allclose(edge_weight, cu_edge_weight.cpu()) - - -@withPackage('cudf') -@withPackage('cugraph') -@pytest.mark.parametrize('edge_weight', [None, torch.randn(4)]) -@pytest.mark.parametrize('directed', [True, False]) -@pytest.mark.parametrize('relabel_nodes', [True, False]) -def test_from_cugraph(edge_weight, directed, relabel_nodes): - import cudf - import cugraph - from torch.utils.dlpack import to_dlpack - - if directed: - edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]]) - else: - edge_index = torch.tensor([[0, 1], [1, 2]]) - - if edge_weight is not None: - edge_weight = edge_weight[:edge_index.size(1)] - - G = cugraph.Graph(directed=directed) - df = cudf.from_dlpack(to_dlpack(edge_index.t())) - if edge_weight is not None: - df['2'] = cudf.from_dlpack(to_dlpack(edge_weight)) - - G.from_cudf_edgelist( - df, - source=0, - destination=1, - edge_attr='2' if edge_weight is not None else None, - renumber=relabel_nodes, - ) - - cu_edge_index, cu_edge_weight = from_cugraph(G) - cu_edge_index, cu_edge_weight = sort_edge_index(cu_edge_index, - cu_edge_weight) - - assert torch.equal(edge_index, cu_edge_index.cpu()) - if edge_weight is not None: - assert torch.allclose(edge_weight, cu_edge_weight.cpu()) - else: - assert cu_edge_weight is None diff --git a/pytorch_geometric-2.3.1/test/utils/test_geodesic.py b/pytorch_geometric-2.3.1/test/utils/test_geodesic.py deleted file mode 100644 index 1a53374..0000000 --- a/pytorch_geometric-2.3.1/test/utils/test_geodesic.py +++ /dev/null @@ -1,43 +0,0 @@ -from math import sqrt - -import pytest -import torch - -from torch_geometric.testing import withPackage -from torch_geometric.utils import geodesic_distance - - -@withPackage('gdist') -@pytest.mark.skip(reason="No way of currently testing this") -def test_geodesic_distance(): - pos = torch.Tensor([[0, 0, 0], [2, 0, 0], [0, 2, 0], [2, 2, 0]]) - face = torch.tensor([[0, 1, 3], [0, 2, 3]]).t() - - out = geodesic_distance(pos, face) - expected = [ - [0, 1, 1, sqrt(2)], - [1, 0, sqrt(2), 1], - [1, sqrt(2), 0, 1], - [sqrt(2), 1, 1, 0], - ] - assert torch.allclose(out, torch.tensor(expected)) - assert torch.allclose(out, geodesic_distance(pos, face, num_workers=-1)) - - out = geodesic_distance(pos, face, norm=False) - expected = [ - [0, 2, 2, 2 * sqrt(2)], - [2, 0, 2 * sqrt(2), 2], - [2, 2 * sqrt(2), 0, 2], - [2 * sqrt(2), 2, 2, 0], - ] - assert torch.allclose(out, torch.tensor(expected)) - - src = torch.tensor([0, 0, 0, 0]) - dest = torch.tensor([0, 1, 2, 3]) - out = geodesic_distance(pos, face, src=src, dest=dest) - expected = [0, 1, 1, sqrt(2)] - assert torch.allclose(out, torch.tensor(expected)) - - out = geodesic_distance(pos, face, dest=dest) - expected = [0, 0, 0, 0] - assert torch.allclose(out, torch.Tensor(expected)) diff --git a/pytorch_geometric-2.3.1/test/utils/test_homophily.py b/pytorch_geometric-2.3.1/test/utils/test_homophily.py deleted file mode 100644 index 7746d08..0000000 --- a/pytorch_geometric-2.3.1/test/utils/test_homophily.py +++ /dev/null @@ -1,28 +0,0 @@ -import pytest -import torch -from torch_sparse import SparseTensor - -from torch_geometric.utils import homophily - - -def test_homophily(): - edge_index = torch.tensor([[0, 1, 2, 3], [1, 2, 0, 4]]) - y = torch.tensor([0, 0, 0, 0, 1]) - batch = torch.tensor([0, 0, 0, 1, 1]) - row, col = edge_index - adj = SparseTensor(row=row, col=col, sparse_sizes=(5, 5)) - - method = 'edge' - assert pytest.approx(homophily(edge_index, y, method=method)) == 0.75 - assert pytest.approx(homophily(adj, y, method=method)) == 0.75 - assert homophily(edge_index, y, batch, method).tolist() == [1., 0.] - - method = 'node' - assert pytest.approx(homophily(edge_index, y, method=method)) == 0.6 - assert pytest.approx(homophily(adj, y, method=method)) == 0.6 - assert homophily(edge_index, y, batch, method).tolist() == [1., 0.] - - method = 'edge_insensitive' - assert pytest.approx(homophily(edge_index, y, method=method)) == 0.1999999 - assert pytest.approx(homophily(adj, y, method=method)) == 0.1999999 - assert homophily(edge_index, y, batch, method).tolist() == [0., 0.] diff --git a/pytorch_geometric-2.3.1/test/utils/test_normalized_cut.py b/pytorch_geometric-2.3.1/test/utils/test_normalized_cut.py deleted file mode 100644 index 8b4359f..0000000 --- a/pytorch_geometric-2.3.1/test/utils/test_normalized_cut.py +++ /dev/null @@ -1,19 +0,0 @@ -import torch - -from torch_geometric.testing import is_full_test -from torch_geometric.utils import normalized_cut - - -def test_normalized_cut(): - row = torch.tensor([0, 1, 1, 1, 2, 2, 3, 3, 4, 4]) - col = torch.tensor([1, 0, 2, 3, 1, 4, 1, 4, 2, 3]) - edge_attr = torch.Tensor([3, 3, 6, 3, 6, 1, 3, 2, 1, 2]) - expected_output = [4, 4, 5, 2.5, 5, 1, 2.5, 2, 1, 2] - - output = normalized_cut(torch.stack([row, col], dim=0), edge_attr) - assert output.tolist() == expected_output - - if is_full_test(): - jit = torch.jit.script(normalized_cut) - output = jit(torch.stack([row, col], dim=0), edge_attr) - assert output.tolist() == expected_output diff --git a/pytorch_geometric-2.3.1/test/utils/test_scatter.py b/pytorch_geometric-2.3.1/test/utils/test_scatter.py deleted file mode 100644 index b5b6087..0000000 --- a/pytorch_geometric-2.3.1/test/utils/test_scatter.py +++ /dev/null @@ -1,120 +0,0 @@ -import pytest -import torch - -from torch_geometric.profile import benchmark -from torch_geometric.testing import withCUDA, withPackage -from torch_geometric.utils import scatter - - -def test_scatter_validate(): - src = torch.randn(100, 32) - index = torch.randint(0, 10, (100, ), dtype=torch.long) - - with pytest.raises(ValueError, match="must be one-dimensional"): - scatter(src, index.view(-1, 1)) - - with pytest.raises(ValueError, match="must lay between 0 and 1"): - scatter(src, index, dim=2) - - with pytest.raises(ValueError, match="invalid `reduce` argument 'std'"): - scatter(src, index, reduce='std') - - -@withCUDA -@withPackage('torch_scatter') -@pytest.mark.parametrize('reduce', ['sum', 'add', 'mean', 'min', 'max']) -def test_scatter(reduce, device): - import torch_scatter - - src = torch.randn(100, 16, device=device) - index = torch.randint(0, 8, (100, ), device=device) - - out1 = scatter(src, index, dim=0, reduce=reduce) - out2 = torch_scatter.scatter(src, index, dim=0, reduce=reduce) - assert out1.device == device - assert torch.allclose(out1, out2, atol=1e-6) - - jit = torch.jit.script(scatter) - out3 = jit(src, index, dim=0, reduce=reduce) - assert torch.allclose(out1, out3, atol=1e-6) - - src = torch.randn(8, 100, 16, device=device) - out1 = scatter(src, index, dim=1, reduce=reduce) - out2 = torch_scatter.scatter(src, index, dim=1, reduce=reduce) - assert out1.device == device - assert torch.allclose(out1, out2, atol=1e-6) - - -@withCUDA -@pytest.mark.parametrize('reduce', ['sum', 'add', 'mean', 'min', 'max']) -def test_scatter_backward(reduce, device): - src = torch.randn(8, 100, 16, device=device, requires_grad=True) - index = torch.randint(0, 8, (100, ), device=device) - - out = scatter(src, index, dim=1, reduce=reduce) - - assert src.grad is None - out.mean().backward() - assert src.grad is not None - - -if __name__ == '__main__': - # Insights on GPU: - # ================ - # * "sum": Prefer `scatter_add_` implementation - # * "mean": Prefer manual implementation via `scatter_add_` + `count` - # * "min"/"max": - # * Prefer `scatter_reduce_` implementation without gradients - # * Prefer `torch_sparse` implementation with gradients - # * "mul": Prefer `torch_sparse` implementation - # - # Insights on CPU: - # ================ - # * "sum": Prefer `scatter_add_` implementation - # * "mean": Prefer manual implementation via `scatter_add_` + `count` - # * "min"/"max": Prefer `scatter_reduce_` implementation - # * "mul" (probably not worth branching for this): - # * Prefer `scatter_reduce_` implementation without gradients - # * Prefer `torch_sparse` implementation with gradients - import argparse - - import torch_scatter - - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cuda') - parser.add_argument('--backward', action='store_true') - args = parser.parse_args() - - num_nodes, num_edges = 1_000, 50_000 - x = torch.randn(num_edges, 64, device=args.device) - index = torch.randint(num_nodes, (num_edges, ), device=args.device) - - def pytorch_scatter(x, index, dim_size, reduce): - if reduce == 'min' or reduce == 'max': - reduce = f'a{aggr}' # `amin` or `amax` - elif reduce == 'mul': - reduce = 'prod' - out = x.new_zeros((dim_size, x.size(-1))) - include_self = reduce in ['sum', 'mean'] - index = index.view(-1, 1).expand(-1, x.size(-1)) - out.scatter_reduce_(0, index, x, reduce, include_self=include_self) - return out - - def own_scatter(x, index, dim_size, reduce): - return torch_scatter.scatter(x, index, dim=0, dim_size=num_nodes, - reduce=reduce) - - def optimized_scatter(x, index, dim_size, reduce): - return scatter(x, index, dim=0, dim_size=dim_size, reduce=reduce) - - aggrs = ['sum', 'mean', 'min', 'max', 'mul'] - for aggr in aggrs: - print(f'Aggregator: {aggr}') - benchmark( - funcs=[pytorch_scatter, own_scatter, optimized_scatter], - func_names=['PyTorch', 'torch_scatter', 'Optimized'], - args=(x, index, num_nodes, aggr), - num_steps=100 if args.device == 'cpu' else 1000, - num_warmups=50 if args.device == 'cpu' else 500, - backward=args.backward, - ) diff --git a/pytorch_geometric-2.3.1/test/utils/test_softmax.py b/pytorch_geometric-2.3.1/test/utils/test_softmax.py deleted file mode 100644 index a84ce67..0000000 --- a/pytorch_geometric-2.3.1/test/utils/test_softmax.py +++ /dev/null @@ -1,89 +0,0 @@ -import torch - -import torch_geometric -from torch_geometric.profile import benchmark -from torch_geometric.utils import softmax - - -def test_softmax(): - src = torch.tensor([1., 1., 1., 1.]) - index = torch.tensor([0, 0, 1, 2]) - ptr = torch.tensor([0, 2, 3, 4]) - - out = softmax(src, index) - assert out.tolist() == [0.5, 0.5, 1, 1] - assert softmax(src, None, ptr).tolist() == out.tolist() - - src = src.view(-1, 1) - out = softmax(src, index) - assert out.tolist() == [[0.5], [0.5], [1], [1]] - assert softmax(src, None, ptr).tolist() == out.tolist() - - jit = torch.jit.script(softmax) - assert torch.allclose(jit(src, index), out) - - -def test_softmax_backward(): - src_sparse = torch.rand(4, 8) - index = torch.tensor([0, 0, 1, 1]) - src_dense = src_sparse.clone().view(2, 2, src_sparse.size(-1)) - - src_sparse.requires_grad_(True) - src_dense.requires_grad_(True) - - out_sparse = softmax(src_sparse, index) - out_sparse.mean().backward() - out_dense = src_dense.softmax(dim=1) - out_dense.mean().backward() - - assert torch.allclose(out_sparse, out_dense.view_as(out_sparse)) - assert torch.allclose(src_sparse.grad, src_dense.grad.view_as(src_sparse)) - - -def test_softmax_dim(): - index = torch.tensor([0, 0, 0, 0]) - ptr = torch.tensor([0, 4]) - - src = torch.randn(4) - assert torch.allclose(softmax(src, index, dim=0), src.softmax(dim=0)) - assert torch.allclose(softmax(src, ptr=ptr, dim=0), src.softmax(dim=0)) - - src = torch.randn(4, 16) - assert torch.allclose(softmax(src, index, dim=0), src.softmax(dim=0)) - assert torch.allclose(softmax(src, ptr=ptr, dim=0), src.softmax(dim=0)) - - src = torch.randn(4, 4) - assert torch.allclose(softmax(src, index, dim=-1), src.softmax(dim=-1)) - assert torch.allclose(softmax(src, ptr=ptr, dim=-1), src.softmax(dim=-1)) - - src = torch.randn(4, 4, 16) - assert torch.allclose(softmax(src, index, dim=1), src.softmax(dim=1)) - assert torch.allclose(softmax(src, ptr=ptr, dim=1), src.softmax(dim=1)) - - -if __name__ == '__main__': - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cuda') - parser.add_argument('--backward', action='store_true') - args = parser.parse_args() - - num_nodes, num_edges = 10_000, 200_000 - x = torch.randn(num_edges, 64, device=args.device) - index = torch.randint(num_nodes, (num_edges, ), device=args.device) - - compiled_softmax = torch_geometric.compile(softmax) - - def dense_softmax(x, index): - x = x.view(num_nodes, -1, x.size(-1)) - return x.softmax(dim=-1) - - benchmark( - funcs=[dense_softmax, softmax, compiled_softmax], - func_names=['Dense Softmax', 'Vanilla', 'Compiled'], - args=(x, index), - num_steps=50 if args.device == 'cpu' else 500, - num_warmups=10 if args.device == 'cpu' else 100, - backward=args.backward, - ) diff --git a/pytorch_geometric-2.3.1/test/utils/test_sparse.py b/pytorch_geometric-2.3.1/test/utils/test_sparse.py deleted file mode 100644 index 7814846..0000000 --- a/pytorch_geometric-2.3.1/test/utils/test_sparse.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch_sparse import SparseTensor - -import torch_geometric.typing -from torch_geometric.testing import is_full_test -from torch_geometric.utils import ( - dense_to_sparse, - is_sparse, - is_torch_sparse_tensor, - to_edge_index, - to_torch_coo_tensor, - to_torch_csc_tensor, - to_torch_csr_tensor, -) - - -def test_dense_to_sparse(): - adj = torch.Tensor([ - [3, 1], - [2, 0], - ]) - edge_index, edge_attr = dense_to_sparse(adj) - assert edge_index.tolist() == [[0, 0, 1], [0, 1, 0]] - assert edge_attr.tolist() == [3, 1, 2] - - if is_full_test(): - jit = torch.jit.script(dense_to_sparse) - edge_index, edge_attr = jit(adj) - assert edge_index.tolist() == [[0, 0, 1], [0, 1, 0]] - assert edge_attr.tolist() == [3, 1, 2] - - adj = torch.Tensor([[ - [3, 1], - [2, 0], - ], [ - [0, 1], - [0, 2], - ]]) - edge_index, edge_attr = dense_to_sparse(adj) - assert edge_index.tolist() == [[0, 0, 1, 2, 3], [0, 1, 0, 3, 3]] - assert edge_attr.tolist() == [3, 1, 2, 1, 2] - - if is_full_test(): - jit = torch.jit.script(dense_to_sparse) - edge_index, edge_attr = jit(adj) - assert edge_index.tolist() == [[0, 0, 1, 2, 3], [0, 1, 0, 3, 3]] - assert edge_attr.tolist() == [3, 1, 2, 1, 2] - - -def test_dense_to_sparse_bipartite(): - edge_index, edge_attr = dense_to_sparse(torch.rand(2, 10, 5)) - assert edge_index[0].max() == 19 - assert edge_index[1].max() == 9 - - -def test_is_torch_sparse_tensor(): - x = torch.randn(5, 5) - - assert not is_torch_sparse_tensor(x) - assert not is_torch_sparse_tensor(SparseTensor.from_dense(x)) - assert is_torch_sparse_tensor(x.to_sparse()) - - -def test_is_sparse(): - x = torch.randn(5, 5) - - assert not is_sparse(x) - assert is_sparse(SparseTensor.from_dense(x)) - assert is_sparse(x.to_sparse()) - - -def test_to_torch_coo_tensor(): - edge_index = torch.tensor([ - [0, 1, 1, 2, 2, 3], - [1, 0, 2, 1, 3, 2], - ]) - edge_attr = torch.randn(edge_index.size(1), 8) - - adj = to_torch_coo_tensor(edge_index, is_coalesced=False) - assert adj.is_coalesced() - assert adj.size() == (4, 4) - assert adj.layout == torch.sparse_coo - assert torch.allclose(adj.indices(), edge_index) - - adj = to_torch_coo_tensor(edge_index, is_coalesced=True) - assert adj.is_coalesced() - assert adj.size() == (4, 4) - assert adj.layout == torch.sparse_coo - assert torch.allclose(adj.indices(), edge_index) - - adj = to_torch_coo_tensor(edge_index, size=6) - assert adj.size() == (6, 6) - assert adj.layout == torch.sparse_coo - assert torch.allclose(adj.indices(), edge_index) - - adj = to_torch_coo_tensor(edge_index, edge_attr) - assert adj.size() == (4, 4, 8) - assert adj.layout == torch.sparse_coo - assert torch.allclose(adj.indices(), edge_index) - assert torch.allclose(adj.values(), edge_attr) - - if is_full_test(): - jit = torch.jit.script(to_torch_coo_tensor) - adj = jit(edge_index, edge_attr) - assert adj.size() == (4, 4, 8) - assert adj.layout == torch.sparse_coo - assert torch.allclose(adj.indices(), edge_index) - assert torch.allclose(adj.values(), edge_attr) - - -def test_to_torch_csr_tensor(): - edge_index = torch.tensor([ - [0, 1, 1, 2, 2, 3], - [1, 0, 2, 1, 3, 2], - ]) - - adj = to_torch_csr_tensor(edge_index) - assert adj.size() == (4, 4) - assert adj.layout == torch.sparse_csr - assert torch.allclose(adj.to_sparse_coo().indices(), edge_index) - - edge_weight = torch.randn(edge_index.size(1)) - adj = to_torch_csr_tensor(edge_index, edge_weight) - assert adj.size() == (4, 4) - assert adj.layout == torch.sparse_csr - assert torch.allclose(adj.to_sparse_coo().indices(), edge_index) - assert torch.allclose(adj.to_sparse_coo().values(), edge_weight) - - if torch_geometric.typing.WITH_PT2: - edge_attr = torch.randn(edge_index.size(1), 8) - adj = to_torch_csr_tensor(edge_index, edge_attr) - assert adj.size() == (4, 4, 8) - assert adj.layout == torch.sparse_csr - assert torch.allclose(adj.to_sparse_coo().indices(), edge_index) - assert torch.allclose(adj.to_sparse_coo().values(), edge_attr) - - -def test_to_torch_csc_tensor(): - edge_index = torch.tensor([ - [0, 1, 1, 2, 2, 3], - [1, 0, 2, 1, 3, 2], - ]) - - adj = to_torch_csc_tensor(edge_index) - assert adj.size() == (4, 4) - assert adj.layout == torch.sparse_csc - adj_coo = adj.to_sparse_coo().coalesce() - if torch_geometric.typing.WITH_PT2: - assert torch.allclose(adj_coo.indices(), edge_index) - else: - assert torch.allclose(adj_coo.indices().flip([0]), edge_index) - - edge_weight = torch.randn(edge_index.size(1)) - adj = to_torch_csc_tensor(edge_index, edge_weight) - assert adj.size() == (4, 4) - assert adj.layout == torch.sparse_csc - adj_coo = adj.to_sparse_coo().coalesce() - if torch_geometric.typing.WITH_PT2: - assert torch.allclose(adj_coo.indices(), edge_index) - assert torch.allclose(adj_coo.values(), edge_weight) - else: - perm = adj_coo.indices()[0].argsort() - assert torch.allclose(adj_coo.indices()[:, perm], edge_index) - assert torch.allclose(adj_coo.values()[perm], edge_weight) - - if torch_geometric.typing.WITH_PT2: - edge_attr = torch.randn(edge_index.size(1), 8) - adj = to_torch_csc_tensor(edge_index, edge_attr) - assert adj.size() == (4, 4, 8) - assert adj.layout == torch.sparse_csc - assert torch.allclose(adj.to_sparse_coo().coalesce().indices(), - edge_index) - assert torch.allclose(adj.to_sparse_coo().coalesce().values(), - edge_attr) - - -def test_to_edge_index(): - adj = torch.tensor([ - [0., 1., 0., 0.], - [1., 0., 1., 0.], - [0., 1., 0., 1.], - [0., 0., 1., 0.], - ]).to_sparse() - - edge_index, edge_attr = to_edge_index(adj) - assert edge_index.tolist() == [[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]] - assert edge_attr.tolist() == [1., 1., 1., 1., 1., 1.] - - if is_full_test(): - jit = torch.jit.script(to_edge_index) - edge_index, edge_attr = jit(adj) - assert edge_index.tolist() == [[0, 1, 1, 2, 2, 3], [1, 0, 2, 1, 3, 2]] - assert edge_attr.tolist() == [1., 1., 1., 1., 1., 1.] diff --git a/pytorch_geometric-2.3.1/test/utils/test_subgraph.py b/pytorch_geometric-2.3.1/test/utils/test_subgraph.py deleted file mode 100644 index 92a905c..0000000 --- a/pytorch_geometric-2.3.1/test/utils/test_subgraph.py +++ /dev/null @@ -1,99 +0,0 @@ -import torch - -from torch_geometric.nn import GCNConv, Linear -from torch_geometric.utils import ( - bipartite_subgraph, - get_num_hops, - index_to_mask, - k_hop_subgraph, - subgraph, -) - - -def test_get_num_hops(): - class GNN(torch.nn.Module): - def __init__(self): - super().__init__() - self.conv1 = GCNConv(3, 16, normalize=False) - self.conv2 = GCNConv(16, 16, normalize=False) - self.lin = Linear(16, 2) - - def forward(self, x, edge_index): - x = torch.F.relu(self.conv1(x, edge_index)) - x = self.conv2(x, edge_index) - return self.lin(x) - - assert get_num_hops(GNN()) == 2 - - -def test_subgraph(): - edge_index = torch.tensor([ - [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6], - [1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5], - ]) - edge_attr = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]) - - idx = torch.tensor([3, 4, 5], dtype=torch.long) - mask = index_to_mask(idx, 7) - indices = idx.tolist() - - for subset in [idx, mask, indices]: - out = subgraph(subset, edge_index, edge_attr, return_edge_mask=True) - assert out[0].tolist() == [[3, 4, 4, 5], [4, 3, 5, 4]] - assert out[1].tolist() == [7, 8, 9, 10] - assert out[2].tolist() == [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0] - - out = subgraph(subset, edge_index, edge_attr, relabel_nodes=True) - assert out[0].tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]] - assert out[1].tolist() == [7, 8, 9, 10] - - -def test_bipartite_subgraph(): - edge_index = torch.tensor([[0, 5, 2, 3, 3, 4, 4, 3, 5, 5, 6], - [0, 0, 3, 2, 0, 0, 2, 1, 2, 3, 1]]) - edge_attr = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) - idx = (torch.tensor([2, 3, 5], dtype=torch.long), - torch.tensor([2, 3], dtype=torch.long)) - mask = (index_to_mask(idx[0], 7), index_to_mask(idx[1], 4)) - indices = (idx[0].tolist(), idx[1].tolist()) - mixed = (mask[0], idx[1]) - - for subset in [idx, mask, indices, mixed]: - out = bipartite_subgraph(subset, edge_index, edge_attr, - return_edge_mask=True) - assert out[0].tolist() == [[2, 3, 5, 5], [3, 2, 2, 3]] - assert out[1].tolist() == [3, 4, 9, 10] - assert out[2].tolist() == [0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0] - - out = bipartite_subgraph(subset, edge_index, edge_attr, - relabel_nodes=True) - assert out[0].tolist() == [[0, 1, 2, 2], [1, 0, 0, 1]] - assert out[1].tolist() == [3, 4, 9, 10] - - -def test_k_hop_subgraph(): - edge_index = torch.tensor([ - [0, 1, 2, 3, 4, 5], - [2, 2, 4, 4, 6, 6], - ]) - - subset, edge_index, mapping, edge_mask = k_hop_subgraph( - 6, 2, edge_index, relabel_nodes=True) - assert subset.tolist() == [2, 3, 4, 5, 6] - assert edge_index.tolist() == [[0, 1, 2, 3], [2, 2, 4, 4]] - assert mapping.tolist() == [4] - assert edge_mask.tolist() == [False, False, True, True, True, True] - - edge_index = torch.tensor([ - [1, 2, 4, 5], - [0, 1, 5, 6], - ]) - - subset, edge_index, mapping, edge_mask = k_hop_subgraph([0, 6], 2, - edge_index, - relabel_nodes=True) - - assert subset.tolist() == [0, 1, 2, 4, 5, 6] - assert edge_index.tolist() == [[1, 2, 3, 4], [0, 1, 4, 5]] - assert mapping.tolist() == [0, 5] - assert edge_mask.tolist() == [True, True, True, True] diff --git a/pytorch_geometric-2.3.1/test/utils/test_to_dense_batch.py b/pytorch_geometric-2.3.1/test/utils/test_to_dense_batch.py deleted file mode 100644 index 9585428..0000000 --- a/pytorch_geometric-2.3.1/test/utils/test_to_dense_batch.py +++ /dev/null @@ -1,54 +0,0 @@ -import torch - -from torch_geometric.testing import is_full_test -from torch_geometric.utils import to_dense_batch - - -def test_to_dense_batch(): - x = torch.Tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) - batch = torch.tensor([0, 0, 1, 2, 2, 2]) - - expected = torch.Tensor([ - [[1, 2], [3, 4], [0, 0]], - [[5, 6], [0, 0], [0, 0]], - [[7, 8], [9, 10], [11, 12]], - ]) - - out, mask = to_dense_batch(x, batch) - assert out.size() == (3, 3, 2) - assert torch.equal(out, expected) - assert mask.tolist() == [[1, 1, 0], [1, 0, 0], [1, 1, 1]] - - if is_full_test(): - jit = torch.jit.script(to_dense_batch) - out, mask = jit(x, batch) - assert torch.equal(out, expected) - assert mask.tolist() == [[1, 1, 0], [1, 0, 0], [1, 1, 1]] - - out, mask = to_dense_batch(x, batch, max_num_nodes=2) - assert out.size() == (3, 2, 2) - assert torch.equal(out, expected[:, :2]) - assert mask.tolist() == [[1, 1], [1, 0], [1, 1]] - - out, mask = to_dense_batch(x, batch, max_num_nodes=5) - assert out.size() == (3, 5, 2) - assert torch.equal(out[:, :3], expected) - assert mask.tolist() == [[1, 1, 0, 0, 0], [1, 0, 0, 0, 0], [1, 1, 1, 0, 0]] - - out, mask = to_dense_batch(x) - assert out.size() == (1, 6, 2) - assert torch.equal(out[0], x) - assert mask.tolist() == [[1, 1, 1, 1, 1, 1]] - - out, mask = to_dense_batch(x, max_num_nodes=2) - assert out.size() == (1, 2, 2) - assert torch.equal(out[0], x[:2]) - assert mask.tolist() == [[1, 1]] - - out, mask = to_dense_batch(x, max_num_nodes=10) - assert out.size() == (1, 10, 2) - assert torch.equal(out[0, :6], x) - assert mask.tolist() == [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0]] - - out, mask = to_dense_batch(x, batch, batch_size=4) - assert out.size() == (4, 3, 2) diff --git a/pytorch_geometric-2.3.1/test/utils/test_trim_to_layer.py b/pytorch_geometric-2.3.1/test/utils/test_trim_to_layer.py deleted file mode 100644 index 64c9916..0000000 --- a/pytorch_geometric-2.3.1/test/utils/test_trim_to_layer.py +++ /dev/null @@ -1,129 +0,0 @@ -from typing import List, Optional - -import torch -from torch import Tensor - -from torch_geometric.data import Data -from torch_geometric.loader import NeighborLoader -from torch_geometric.nn import GraphConv -from torch_geometric.testing import withPackage -from torch_geometric.utils import trim_to_layer - - -def test_trim_to_layer_basic(): - x = torch.arange(4) - edge_index = torch.tensor([[1, 2, 3], [0, 1, 2]]) - edge_weight = torch.arange(3) - - num_sampled_nodes_per_hop = [1, 1, 1, 1] - num_sampled_edges_per_hop = [1, 1, 1] - - x, edge_index, edge_weight = trim_to_layer( - layer=0, - num_sampled_nodes_per_hop=num_sampled_nodes_per_hop, - num_sampled_edges_per_hop=num_sampled_edges_per_hop, - x=x, - edge_index=edge_index, - edge_attr=edge_weight, - ) - assert torch.equal(x, torch.arange(4)) - assert edge_index.tolist() == [[1, 2, 3], [0, 1, 2]] - assert torch.equal(edge_weight, torch.arange(3)) - - x, edge_index, edge_weight = trim_to_layer( - layer=1, - num_sampled_nodes_per_hop=num_sampled_nodes_per_hop, - num_sampled_edges_per_hop=num_sampled_edges_per_hop, - x=x, - edge_index=edge_index, - edge_attr=edge_weight, - ) - assert torch.equal(x, torch.arange(3)) - assert edge_index.tolist() == [[1, 2], [0, 1]] - assert torch.equal(edge_weight, torch.arange(2)) - - x, edge_index, edge_weight = trim_to_layer( - layer=2, - num_sampled_nodes_per_hop=num_sampled_nodes_per_hop, - num_sampled_edges_per_hop=num_sampled_edges_per_hop, - x=x, - edge_index=edge_index, - edge_attr=edge_weight, - ) - assert torch.equal(x, torch.arange(2)) - assert edge_index.tolist() == [[1], [0]] - assert torch.equal(edge_weight, torch.arange(1)) - - -def test_trim_to_layer_hetero(): - x = {'v': torch.arange(4)} - edge_index = {('v', 'to', 'v'): torch.tensor([[1, 2, 3], [0, 1, 2]])} - edge_weight = {('v', 'to', 'v'): torch.arange(3)} - - num_sampled_nodes_per_hop = {'v': [1, 1, 1, 1]} - num_sampled_edges_per_hop = {('v', 'to', 'v'): [1, 1, 1]} - - x, edge_index, edge_weight = trim_to_layer( - layer=1, - num_sampled_nodes_per_hop=num_sampled_nodes_per_hop, - num_sampled_edges_per_hop=num_sampled_edges_per_hop, - x=x, - edge_index=edge_index, - edge_attr=edge_weight, - ) - assert torch.equal(x['v'], torch.arange(3)) - assert edge_index['v', 'to', 'v'].tolist() == [[1, 2], [0, 1]] - assert torch.equal(edge_weight['v', 'to', 'v'], torch.arange(2)) - - -class GNN(torch.nn.Module): - def __init__(self, num_layers: int): - super().__init__() - - self.convs = torch.nn.ModuleList( - GraphConv(16, 16) for _ in range(num_layers)) - - def forward( - self, - x: Tensor, - edge_index: Tensor, - edge_weight: Tensor, - num_sampled_nodes: Optional[List[int]] = None, - num_sampled_edges: Optional[List[int]] = None, - ) -> Tensor: - for i, conv in enumerate(self.convs): - if num_sampled_nodes is not None: - x, edge_index, edge_weight = trim_to_layer( - i, num_sampled_nodes, num_sampled_edges, x, edge_index, - edge_weight) - x = conv(x, edge_index, edge_weight) - return x - - -@withPackage('pyg_lib') -def test_trim_to_layer_with_neighbor_loader(): - x = torch.randn(14, 16) - edge_index = torch.tensor([ - [2, 3, 4, 5, 7, 7, 10, 11, 12, 13], - [0, 1, 2, 3, 2, 3, 7, 7, 7, 7], - ]) - edge_weight = torch.rand(edge_index.size(1)) - data = Data(x=x, edge_index=edge_index, edge_weight=edge_weight) - - loader = NeighborLoader( - data, - num_neighbors=[1, 2, 4], - batch_size=2, - shuffle=False, - ) - batch = next(iter(loader)) - - model = GNN(num_layers=3) - out1 = model(batch.x, batch.edge_index, batch.edge_weight)[:2] - assert out1.size() == (2, 16) - - out2 = model(batch.x, batch.edge_index, batch.edge_weight, - batch.num_sampled_nodes, batch.num_sampled_edges)[:2] - assert out2.size() == (2, 16) - - assert torch.allclose(out1, out2) diff --git a/pytorch_geometric-2.3.1/torch_geometric/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/__init__.py deleted file mode 100644 index b44b5dd..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -import torch_geometric.utils -import torch_geometric.data -import torch_geometric.sampler -import torch_geometric.loader -import torch_geometric.transforms -import torch_geometric.datasets -import torch_geometric.nn -import torch_geometric.explain -import torch_geometric.profile - -from .seed import seed_everything -from .home import get_home_dir, set_home_dir -from .compile import compile -from .debug import is_debug_enabled, debug, set_debug -from .experimental import (is_experimental_mode_enabled, experimental_mode, - set_experimental_mode) -from .lazy_loader import LazyLoader - -contrib = LazyLoader('contrib', globals(), 'torch_geometric.contrib') -graphgym = LazyLoader('graphgym', globals(), 'torch_geometric.graphgym') - -__version__ = '2.3.0' - -__all__ = [ - 'seed_everything', - 'get_home_dir', - 'set_home_dir', - 'compile', - 'is_debug_enabled', - 'debug', - 'set_debug', - 'is_experimental_mode_enabled', - 'experimental_mode', - 'set_experimental_mode', - 'torch_geometric', - '__version__', -] diff --git a/pytorch_geometric-2.3.1/torch_geometric/contrib/explain/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/contrib/explain/__init__.py deleted file mode 100644 index 3f66ffc..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/contrib/explain/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .graphmask_explainer import GraphMaskExplainer -from .pgm_explainer import PGMExplainer - -__all__ = classes = [ - 'GraphMaskExplainer', - 'PGMExplainer', -] diff --git a/pytorch_geometric-2.3.1/torch_geometric/contrib/explain/graphmask_explainer.py b/pytorch_geometric-2.3.1/torch_geometric/contrib/explain/graphmask_explainer.py deleted file mode 100644 index 8d95313..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/contrib/explain/graphmask_explainer.py +++ /dev/null @@ -1,513 +0,0 @@ -import math -from typing import Optional, Union - -import numpy as np -import torch -import torch.nn.functional as F -from torch import Tensor, sigmoid -from torch.nn import LayerNorm, Linear, Parameter, ReLU, Sequential, init -from tqdm import tqdm - -from torch_geometric.explain import Explanation -from torch_geometric.explain.algorithm import ExplainerAlgorithm -from torch_geometric.explain.config import ( - MaskType, - ModelMode, - ModelReturnType, - ModelTaskLevel, -) -from torch_geometric.nn import MessagePassing - - -def explain_message(self, out, x_i, x_j): - norm = Sequential(LayerNorm(out.size(-1)).to(out.device), ReLU()) - basis_messages = norm(out) - - if getattr(self, 'message_scale', None) is not None: - basis_messages = basis_messages * self.message_scale.unsqueeze(-1) - - if self.message_replacement is not None: - if basis_messages.shape == self.message_replacement.shape: - basis_messages = (basis_messages + - (1 - self.message_scale).unsqueeze(-1) * - self.message_replacement) - else: - basis_messages = (basis_messages + - ((1 - self.message_scale).unsqueeze(-1) * - self.message_replacement.unsqueeze(0))) - - self.latest_messages = basis_messages - self.latest_source_embeddings = x_j - self.latest_target_embeddings = x_i - - return basis_messages - - -class GraphMaskExplainer(ExplainerAlgorithm): - r"""The GraphMask-Explainer model from the `"Interpreting Graph Neural - Networks for NLP With Differentiable Edge Masking" - `_ paper for identifying layer-wise - compact subgraph structures and node features that play a crucial role in - the predictions made by a GNN. - - .. note:: - - For an example of using :class:`GraphMaskExplainer`, - see `examples/contrib/graphmask_explainer.py - `_. - - Args: - num_layers (int): The number of layers to use. - epochs (int, optional): The number of epochs to train. - (default: :obj:`100`) - lr (float, optional): The learning rate to apply. - (default: :obj:`0.01`) - penalty_scaling (int, optional): Scaling value of penalty term. Value - must lie between 0 and 10. (default: :obj:`5`) - lambda_optimizer_lr (float, optional): The learning rate to optimize - the Lagrange multiplier. (default: :obj:`1e-2`) - init_lambda (float, optional): The Lagrange multiplier. Value must lie - between :obj:`0` and `1`. (default: :obj:`0.55`) - allowance (float, optional): A float value between :obj:`0` and - :obj:`1` denotes tolerance level. (default: :obj:`0.03`) - layer_type (str, optional): The type of GNN layer being used in the GNN - model. (default: :obj:`GCN`) - log (bool, optional): If set to :obj:`False`, will not log any - learning progress. (default: :obj:`True`) - **kwargs (optional): Additional hyper-parameters to override default - settings in - :attr:`~torch_geometric.nn.models.GraphMaskExplainer.coeffs`. - """ - coeffs = { - 'node_feat_size': 1.0, - 'node_feat_reduction': 'mean', - 'node_feat_ent': 0.1, - 'EPS': 1e-15, - } - - def __init__( - self, - num_layers: int, - epochs: int = 100, - lr: float = 0.01, - penalty_scaling: int = 5, - lambda_optimizer_lr: int = 1e-2, - init_lambda: int = 0.55, - allowance: int = 0.03, - layer_type: str = 'GCN', - allow_multiple_explanations: bool = False, - log: bool = True, - **kwargs, - ): - super().__init__() - assert layer_type in ['GCN', 'GAT', 'FastRGCN'] - assert 0 <= penalty_scaling <= 10 - assert 0 <= init_lambda <= 1 - assert 0 <= allowance <= 1 - - self.num_layers = num_layers - self.init_lambda = init_lambda - self.lambda_optimizer_lr = lambda_optimizer_lr - self.penalty_scaling = penalty_scaling - self.allowance = allowance - self.layer_type = layer_type - self.allow_multiple_explanations = allow_multiple_explanations - self.epochs = epochs - self.lr = lr - self.log = log - self.coeffs.update(kwargs) - - def forward( - self, - model: torch.nn.Module, - x: Tensor, - edge_index: Tensor, - *, - target: Tensor, - index: Optional[Union[int, Tensor]] = None, - **kwargs, - ) -> Explanation: - - hard_node_mask = None - - if self.model_config.task_level == ModelTaskLevel.node: - hard_node_mask, hard_edge_mask = self._get_hard_masks( - model, index, edge_index, num_nodes=x.size(0)) - self.train_explainer(model, x, edge_index, target=target, index=index, - **kwargs) - node_mask = self._post_process_mask(self.node_feat_mask, - hard_node_mask, apply_sigmoid=True) - edge_mask = self.explain(model, index=index) - edge_mask = edge_mask[:edge_index.size(1)] - - return Explanation(node_mask=node_mask, edge_mask=edge_mask) - - def supports(self) -> bool: - return True - - def hard_concrete(self, input_element, summarize_penalty=True, beta=1 / 3, - gamma=-0.2, zeta=1.2, loc_bias=2, min_val=0, max_val=1, - training=True) -> Union[Tensor, Tensor]: - input_element = input_element + loc_bias - - if training: - u = torch.empty_like(input_element).uniform_(1e-6, 1.0 - 1e-6) - - s = sigmoid( - (torch.log(u) - torch.log(1 - u) + input_element) / beta) - - penalty = sigmoid(input_element - - beta * np.math.log(-gamma / zeta)) - else: - s = sigmoid(input_element) - penalty = torch.zeros_like(input_element) - - if summarize_penalty: - penalty = penalty.mean() - - s = s * (zeta - gamma) + gamma - - clipped_s = s.clamp(min_val, max_val) - - clip_value = (torch.min(clipped_s) + torch.max(clipped_s)) / 2 - hard_concrete = (clipped_s > clip_value).float() - clipped_s = clipped_s + (hard_concrete - clipped_s).detach() - - return clipped_s, penalty - - def set_masks(self, i_dim, j_dim, h_dim, x, device): - if self.layer_type == 'GCN' or self.layer_type == 'GAT': - i_dim = j_dim - (num_nodes, num_feat), std = x.size(), 0.1 - self.feat_mask_type = self.explainer_config.node_mask_type - - if self.feat_mask_type == MaskType.attributes: - self.node_feat_mask = torch.nn.Parameter( - torch.randn(num_nodes, num_feat, device=device) * std) - elif self.feat_mask_type == MaskType.object: - self.node_feat_mask = torch.nn.Parameter( - torch.randn(num_nodes, 1, device=device) * std) - else: - self.node_feat_mask = torch.nn.Parameter( - torch.randn(1, num_feat, device=device) * std) - - baselines, self.gates, full_biases = [], torch.nn.ModuleList(), [] - - for v_dim, m_dim, h_dim in zip(i_dim, j_dim, h_dim): - self.transform, self.layer_norm = [], [] - input_dims = [v_dim, m_dim, v_dim] - for _, input_dim in enumerate(input_dims): - self.transform.append( - Linear(input_dim, h_dim, bias=False).to(device)) - self.layer_norm.append(LayerNorm(h_dim).to(device)) - - self.transforms = torch.nn.ModuleList(self.transform) - self.layer_norms = torch.nn.ModuleList(self.layer_norm) - - self.full_bias = Parameter( - torch.tensor(h_dim, dtype=torch.float, device=device)) - full_biases.append(self.full_bias) - - self.reset_parameters(input_dims, h_dim) - - self.non_linear = ReLU() - self.output_layer = Linear(h_dim, 1).to(device) - - gate = [ - self.transforms, self.layer_norms, self.non_linear, - self.output_layer - ] - self.gates.extend(gate) - - baseline = torch.tensor(m_dim, dtype=torch.float, device=device) - stdv = 1. / math.sqrt(m_dim) - baseline.uniform_(-stdv, stdv) - baseline = torch.nn.Parameter(baseline) - baselines.append(baseline) - - full_biases = torch.nn.ParameterList(full_biases) - self.full_biases = full_biases - - baselines = torch.nn.ParameterList(baselines) - self.baselines = baselines - - for parameter in self.parameters(): - parameter.requires_grad = False - - def enable_layer(self, layer): - for d in range(layer * 4, (layer * 4) + 4): - for parameter in self.gates[d].parameters(): - parameter.requires_grad = True - self.full_biases[layer].requires_grad = True - self.baselines[layer].requires_grad = True - - def reset_parameters(self, input_dims, h_dim): - fan_in = sum(input_dims) - - std = math.sqrt(2.0 / float(fan_in + h_dim)) - a = math.sqrt(3.0) * std - - for transform in self.transforms: - init._no_grad_uniform_(transform.weight, -a, a) - - init.zeros_(self.full_bias) - - for layer_norm in self.layer_norms: - layer_norm.reset_parameters() - - def _loss_regression(self, y_hat: Tensor, y: Tensor) -> Tensor: - assert self.model_config.return_type == ModelReturnType.raw - return F.mse_loss(y_hat, y) - - def _loss_binary_classification(self, y_hat: Tensor, y: Tensor) -> Tensor: - if self.model_config.return_type == ModelReturnType.raw: - loss_fn = F.binary_cross_entropy_with_logits - elif self.model_config.return_type == ModelReturnType.probs: - loss_fn = F.binary_cross_entropy - else: - assert False - - return loss_fn(y_hat.view_as(y), y.float()) - - def _loss_multiclass_classification( - self, - y_hat: Tensor, - y: Tensor, - ) -> Tensor: - if self.model_config.return_type == ModelReturnType.raw: - loss_fn = F.cross_entropy - elif self.model_config.return_type == ModelReturnType.probs: - loss_fn = F.nll_loss - y_hat = y_hat.log() - elif self.model_config.return_type == ModelReturnType.log_probs: - loss_fn = F.nll_loss - else: - assert False - - return loss_fn(y_hat, y) - - def _loss(self, y_hat: Tensor, y: Tensor, penalty) -> Tensor: - if self.model_config.mode == ModelMode.binary_classification: - loss = self._loss_binary_classification(y_hat, y) - elif self.model_config.mode == ModelMode.multiclass_classification: - loss = self._loss_multiclass_classification(y_hat, y) - elif self.model_config.mode == ModelMode.regression: - loss = self._loss_regression(y_hat, y) - else: - assert False - - g = torch.relu(loss - self.allowance).mean() - f = penalty * self.penalty_scaling - - loss = f + F.softplus(self.lambda_op) * g - - m = self.node_feat_mask.sigmoid() - node_feat_reduce = getattr(torch, self.coeffs['node_feat_reduction']) - loss = loss + self.coeffs['node_feat_size'] * node_feat_reduce(m) - ent = -m * torch.log(m + self.coeffs['EPS']) - ( - 1 - m) * torch.log(1 - m + self.coeffs['EPS']) - loss = loss + self.coeffs['node_feat_ent'] * ent.mean() - - return loss - - def freeze_model(self, module): - for param in module.parameters(): - param.requires_grad = False - - def _set_flags(self, model): - for module in model.modules(): - if isinstance(module, MessagePassing): - module.explain_message = explain_message.__get__( - module, MessagePassing) - module.explain = True - - def _inject_messages(self, model: torch.nn.Module, message_scale, - message_replacement, set=False): - i = 0 - for module in model.modules(): - if isinstance(module, MessagePassing): - if not set: - module.message_scale = message_scale[i] - module.message_replacement = message_replacement[i] - i = i + 1 - else: - module.message_scale = None - module.message_replacement = None - - def train_explainer(self, model: torch.nn.Module, x: Tensor, - edge_index: Tensor, *, target: Tensor, - index: Optional[Union[int, Tensor]] = None, **kwargs): - - if not isinstance(index, Tensor) and not isinstance(index, int) \ - and index is not None: - raise ValueError("'index' parameter can only be a 'Tensor', " - "'integer' or set to 'None' instead.") - - self.freeze_model(model) - self._set_flags(model) - - input_dims, output_dims = [], [] - for module in model.modules(): - if isinstance(module, MessagePassing): - input_dims.append(module.in_channels) - output_dims.append(module.out_channels) - - self.set_masks(input_dims, output_dims, output_dims, x, x.device) - - optimizer = torch.optim.Adam(self.parameters(), lr=self.lr) - - for layer in reversed(list(range(self.num_layers))): - if self.log: - pbar = tqdm(total=self.epochs) - if self.model_config.task_level == ModelTaskLevel.node: - pbar.set_description( - f'Train explainer for node(s) {index} with layer ' - f'{layer}') - elif self.model_config.task_level == ModelTaskLevel.edge: - pbar.set_description( - f"Train explainer for edge-level task with layer " - f"{layer}") - else: - pbar.set_description( - f'Train explainer for graph {index} with layer ' - f'{layer}') - self.enable_layer(layer) - for epoch in range(self.epochs): - with torch.no_grad(): - model(x, edge_index, **kwargs) - gates, total_penalty = [], 0 - latest_source_embeddings, latest_messages = [], [] - latest_target_embeddings = [] - for module in model.modules(): - if isinstance(module, MessagePassing): - latest_source_embeddings.append( - module.latest_source_embeddings) - latest_messages.append(module.latest_messages) - latest_target_embeddings.append( - module.latest_target_embeddings) - gate_input = [ - latest_source_embeddings, latest_messages, - latest_target_embeddings - ] - for i in range(self.num_layers): - output = self.full_biases[i] - for j in range(len(gate_input)): - partial = self.gates[i * 4][j](gate_input[j][i]) - result = self.gates[(i * 4) + 1][j](partial) - output = output + result - relu_output = self.gates[(i * 4) + 2](output / - len(gate_input)) - sampling_weights = self.gates[(i * 4) + - 3](relu_output).squeeze( - dim=-1) - sampling_weights, penalty = self.hard_concrete( - sampling_weights) - gates.append(sampling_weights) - total_penalty += penalty - - self._inject_messages(model, gates, self.baselines) - - self.lambda_op = torch.tensor(self.init_lambda, - requires_grad=True) - optimizer_lambda = torch.optim.RMSprop( - [self.lambda_op], lr=self.lambda_optimizer_lr, - centered=True) - - optimizer.zero_grad() - optimizer_lambda.zero_grad() - - h = x * self.node_feat_mask.sigmoid() - y_hat, y = model(x=h, edge_index=edge_index, **kwargs), target - - if self.model_config.task_level == ModelTaskLevel.node \ - or self.model_config.task_level == ModelTaskLevel.edge: - if index is not None: - y_hat, y = y_hat[index], y[index] - - self._inject_messages(model, gates, self.baselines, True) - - loss = self._loss(y_hat, y, total_penalty) - - loss.backward() - optimizer.step() - self.lambda_op.grad *= -1 - optimizer_lambda.step() - - if self.lambda_op.item() < -2: - self.lambda_op.data = torch.full_like( - self.lambda_op.data, -2) - elif self.lambda_op.item() > 30: - self.lambda_op.data = torch.full_like( - self.lambda_op.data, 30) - - if self.log: - pbar.update(1) - - if self.log: - pbar.close() - - def explain(self, model: torch.nn.Module, *, - index: Optional[Union[int, Tensor]] = None) -> Tensor: - - if not isinstance(index, Tensor) and not isinstance(index, int) \ - and index is not None: - raise ValueError("'index' parameter can only be a 'Tensor', " - "'integer' or set to 'None' instead.") - - self.freeze_model(model) - self._set_flags(model) - - with torch.no_grad(): - latest_source_embeddings, latest_messages = [], [] - latest_target_embeddings = [] - for module in model.modules(): - if isinstance(module, MessagePassing): - latest_source_embeddings.append( - module.latest_source_embeddings) - latest_messages.append(module.latest_messages) - latest_target_embeddings.append( - module.latest_target_embeddings) - gate_input = [ - latest_source_embeddings, latest_messages, - latest_target_embeddings - ] - if self.log: - pbar = tqdm(total=self.num_layers) - for i in range(self.num_layers): - if self.log: - pbar.set_description("Explain") - output = self.full_biases[i] - for j in range(len(gate_input)): - partial = self.gates[i * 4][j](gate_input[j][i]) - result = self.gates[(i * 4) + 1][j](partial) - output = output + result - relu_output = self.gates[(i * 4) + 2](output / len(gate_input)) - sampling_weights = self.gates[(i * 4) + - 3](relu_output).squeeze(dim=-1) - sampling_weights, _ = self.hard_concrete( - sampling_weights, training=False) - if i == 0: - edge_weight = sampling_weights - else: - if (edge_weight.size(-1) != sampling_weights.size(-1) - and self.layer_type == 'GAT'): - sampling_weights = F.pad( - input=sampling_weights, - pad=(0, edge_weight.size(-1) - - sampling_weights.size(-1), 0, 0), - mode='constant', value=0) - edge_weight = torch.cat((edge_weight, sampling_weights), 0) - if self.log: - pbar.update(1) - if self.log: - pbar.close() - - edge_mask = edge_weight.view(-1, - edge_weight.size(0) // self.num_layers) - edge_mask = torch.mean(edge_mask, 0) - - return edge_mask - - def __repr__(self): - return f'{self.__class__.__name__}()' diff --git a/pytorch_geometric-2.3.1/torch_geometric/data/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/data/__init__.py deleted file mode 100644 index a4787b1..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/data/__init__.py +++ /dev/null @@ -1,87 +0,0 @@ -# flake8: noqa - -from .feature_store import FeatureStore, TensorAttr -from .graph_store import GraphStore, EdgeAttr -from .data import Data -from .hetero_data import HeteroData -from .batch import Batch -from .temporal import TemporalData -from .dataset import Dataset -from .in_memory_dataset import InMemoryDataset -from .makedirs import makedirs -from .download import download_url -from .extract import extract_tar, extract_zip, extract_bz2, extract_gz - -from torch_geometric.lazy_loader import LazyLoader - -data_classes = [ - 'Data', - 'HeteroData', - 'Batch', - 'TemporalData', - 'Dataset', - 'InMemoryDataset', -] - -remote_backend_classes = [ - 'FeatureStore', - 'GraphStore', - 'TensorAttr', - 'EdgeAttr', -] - -helper_functions = [ - 'makedirs', - 'download_url', - 'extract_tar', - 'extract_zip', - 'extract_bz2', - 'extract_gz', -] - -__all__ = data_classes + remote_backend_classes + helper_functions - -lightning = LazyLoader('lightning', globals(), - 'torch_geometric.data.lightning') - -from torch_geometric.deprecation import deprecated -from torch_geometric.loader import NeighborSampler -from torch_geometric.loader import ClusterData -from torch_geometric.loader import ClusterLoader -from torch_geometric.loader import GraphSAINTSampler -from torch_geometric.loader import GraphSAINTNodeSampler -from torch_geometric.loader import GraphSAINTEdgeSampler -from torch_geometric.loader import GraphSAINTRandomWalkSampler -from torch_geometric.loader import ShaDowKHopSampler -from torch_geometric.loader import RandomNodeLoader -from torch_geometric.loader import DataLoader -from torch_geometric.loader import DataListLoader -from torch_geometric.loader import DenseDataLoader - -NeighborSampler = deprecated("use 'loader.NeighborSampler' instead", - 'data.NeighborSampler')(NeighborSampler) -ClusterData = deprecated("use 'loader.ClusterData' instead", - 'data.ClusterData')(ClusterData) -ClusterLoader = deprecated("use 'loader.ClusterLoader' instead", - 'data.ClusterLoader')(ClusterLoader) -GraphSAINTSampler = deprecated("use 'loader.GraphSAINTSampler' instead", - 'data.GraphSAINTSampler')(GraphSAINTSampler) -GraphSAINTNodeSampler = deprecated( - "use 'loader.GraphSAINTNodeSampler' instead", - 'data.GraphSAINTNodeSampler')(GraphSAINTNodeSampler) -GraphSAINTEdgeSampler = deprecated( - "use 'loader.GraphSAINTEdgeSampler' instead", - 'data.GraphSAINTEdgeSampler')(GraphSAINTEdgeSampler) -GraphSAINTRandomWalkSampler = deprecated( - "use 'loader.GraphSAINTRandomWalkSampler' instead", - 'data.GraphSAINTRandomWalkSampler')(GraphSAINTRandomWalkSampler) -ShaDowKHopSampler = deprecated("use 'loader.ShaDowKHopSampler' instead", - 'data.ShaDowKHopSampler')(ShaDowKHopSampler) -RandomNodeSampler = deprecated("use 'loader.RandomNodeLoader' instead", - 'data.RandomNodeSampler')(RandomNodeLoader) -DataLoader = deprecated("use 'loader.DataLoader' instead", - 'data.DataLoader')(DataLoader) -DataListLoader = deprecated("use 'loader.DataListLoader' instead", - 'data.DataListLoader')(DataListLoader) -DenseDataLoader = deprecated("use 'loader.DenseDataLoader' instead", - 'data.DenseDataLoader')(DenseDataLoader) diff --git a/pytorch_geometric-2.3.1/torch_geometric/data/data.py b/pytorch_geometric-2.3.1/torch_geometric/data/data.py deleted file mode 100644 index c4d0ec3..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/data/data.py +++ /dev/null @@ -1,992 +0,0 @@ -import copy -import warnings -from collections.abc import Mapping, Sequence -from dataclasses import dataclass -from itertools import chain -from typing import ( - Any, - Callable, - Dict, - Iterable, - List, - NamedTuple, - Optional, - Tuple, - Union, -) - -import numpy as np -import torch -from torch import Tensor - -from torch_geometric.data import EdgeAttr, FeatureStore, GraphStore, TensorAttr -from torch_geometric.data.feature_store import _field_status -from torch_geometric.data.graph_store import EdgeLayout -from torch_geometric.data.storage import ( - BaseStorage, - EdgeStorage, - GlobalStorage, - NodeStorage, -) -from torch_geometric.deprecation import deprecated -from torch_geometric.typing import ( - EdgeTensorType, - EdgeType, - FeatureTensorType, - NodeType, - OptTensor, - SparseTensor, -) -from torch_geometric.utils import select, subgraph - - -class BaseData(object): - def __getattr__(self, key: str) -> Any: - raise NotImplementedError - - def __setattr__(self, key: str, value: Any): - raise NotImplementedError - - def __delattr__(self, key: str): - raise NotImplementedError - - def __getitem__(self, key: str) -> Any: - raise NotImplementedError - - def __setitem__(self, key: str, value: Any): - raise NotImplementedError - - def __delitem__(self, key: str): - raise NotImplementedError - - def __copy__(self): - raise NotImplementedError - - def __deepcopy__(self, memo): - raise NotImplementedError - - def __repr__(self) -> str: - raise NotImplementedError - - def stores_as(self, data: 'BaseData'): - raise NotImplementedError - - @property - def stores(self) -> List[BaseStorage]: - raise NotImplementedError - - @property - def node_stores(self) -> List[NodeStorage]: - raise NotImplementedError - - @property - def edge_stores(self) -> List[EdgeStorage]: - raise NotImplementedError - - def to_dict(self) -> Dict[str, Any]: - r"""Returns a dictionary of stored key/value pairs.""" - raise NotImplementedError - - def to_namedtuple(self) -> NamedTuple: - r"""Returns a :obj:`NamedTuple` of stored key/value pairs.""" - raise NotImplementedError - - def update(self, data: 'BaseData') -> 'BaseData': - r"""Updates the data object with the elements from another data object. - """ - raise NotImplementedError - - def __cat_dim__(self, key: str, value: Any, *args, **kwargs) -> Any: - r"""Returns the dimension for which the value :obj:`value` of the - attribute :obj:`key` will get concatenated when creating mini-batches - using :class:`torch_geometric.loader.DataLoader`. - - .. note:: - - This method is for internal use only, and should only be overridden - in case the mini-batch creation process is corrupted for a specific - attribute. - """ - raise NotImplementedError - - def __inc__(self, key: str, value: Any, *args, **kwargs) -> Any: - r"""Returns the incremental count to cumulatively increase the value - :obj:`value` of the attribute :obj:`key` when creating mini-batches - using :class:`torch_geometric.loader.DataLoader`. - - .. note:: - - This method is for internal use only, and should only be overridden - in case the mini-batch creation process is corrupted for a specific - attribute. - """ - raise NotImplementedError - - def debug(self): - raise NotImplementedError - - ########################################################################### - - @property - def keys(self) -> List[str]: - r"""Returns a list of all graph attribute names.""" - out = [] - for store in self.stores: - out += list(store.keys()) - return list(set(out)) - - def __len__(self) -> int: - r"""Returns the number of graph attributes.""" - return len(self.keys) - - def __contains__(self, key: str) -> bool: - r"""Returns :obj:`True` if the attribute :obj:`key` is present in the - data.""" - return key in self.keys - - def __getstate__(self) -> Dict[str, Any]: - return self.__dict__ - - def __setstate__(self, mapping: Dict[str, Any]): - for key, value in mapping.items(): - self.__dict__[key] = value - - @property - def num_nodes(self) -> Optional[int]: - r"""Returns the number of nodes in the graph. - - .. note:: - The number of nodes in the data object is automatically inferred - in case node-level attributes are present, *e.g.*, :obj:`data.x`. - In some cases, however, a graph may only be given without any - node-level attributes. - :pyg:`PyG` then *guesses* the number of nodes according to - :obj:`edge_index.max().item() + 1`. - However, in case there exists isolated nodes, this number does not - have to be correct which can result in unexpected behavior. - Thus, we recommend to set the number of nodes in your data object - explicitly via :obj:`data.num_nodes = ...`. - You will be given a warning that requests you to do so. - """ - try: - return sum([v.num_nodes for v in self.node_stores]) - except TypeError: - return None - - def size( - self, dim: Optional[int] = None - ) -> Union[Tuple[Optional[int], Optional[int]], Optional[int]]: - r"""Returns the size of the adjacency matrix induced by the graph.""" - size = (self.num_nodes, self.num_nodes) - return size if dim is None else size[dim] - - @property - def num_edges(self) -> int: - r"""Returns the number of edges in the graph. - For undirected graphs, this will return the number of bi-directional - edges, which is double the amount of unique edges.""" - return sum([v.num_edges for v in self.edge_stores]) - - def node_attrs(self) -> List[str]: - r"""Returns all node-level tensor attribute names.""" - return list(set(chain(*[s.node_attrs() for s in self.node_stores]))) - - def edge_attrs(self) -> List[str]: - r"""Returns all edge-level tensor attribute names.""" - return list(set(chain(*[s.edge_attrs() for s in self.edge_stores]))) - - def is_coalesced(self) -> bool: - r"""Returns :obj:`True` if edge indices :obj:`edge_index` are sorted - and do not contain duplicate entries.""" - return all([store.is_coalesced() for store in self.edge_stores]) - - def generate_ids(self): - r"""Generates and sets :obj:`n_id` and :obj:`e_id` attributes to assign - each node and edge to a continuously ascending and unique ID.""" - for store in self.node_stores: - store.n_id = torch.arange(store.num_nodes) - for store in self.edge_stores: - store.e_id = torch.arange(store.num_edges) - - def coalesce(self): - r"""Sorts and removes duplicated entries from edge indices - :obj:`edge_index`.""" - for store in self.edge_stores: - store.coalesce() - return self - - def has_isolated_nodes(self) -> bool: - r"""Returns :obj:`True` if the graph contains isolated nodes.""" - return any([store.has_isolated_nodes() for store in self.edge_stores]) - - def has_self_loops(self) -> bool: - """Returns :obj:`True` if the graph contains self-loops.""" - return any([store.has_self_loops() for store in self.edge_stores]) - - def is_undirected(self) -> bool: - r"""Returns :obj:`True` if graph edges are undirected.""" - return all([store.is_undirected() for store in self.edge_stores]) - - def is_directed(self) -> bool: - r"""Returns :obj:`True` if graph edges are directed.""" - return not self.is_undirected() - - def apply_(self, func: Callable, *args: List[str]): - r"""Applies the in-place function :obj:`func`, either to all attributes - or only the ones given in :obj:`*args`.""" - for store in self.stores: - store.apply_(func, *args) - return self - - def apply(self, func: Callable, *args: List[str]): - r"""Applies the function :obj:`func`, either to all attributes or only - the ones given in :obj:`*args`.""" - for store in self.stores: - store.apply(func, *args) - return self - - def clone(self, *args: List[str]): - r"""Performs cloning of tensors, either for all attributes or only the - ones given in :obj:`*args`.""" - return copy.copy(self).apply(lambda x: x.clone(), *args) - - def contiguous(self, *args: List[str]): - r"""Ensures a contiguous memory layout, either for all attributes or - only the ones given in :obj:`*args`.""" - return self.apply(lambda x: x.contiguous(), *args) - - def to(self, device: Union[int, str], *args: List[str], - non_blocking: bool = False): - r"""Performs tensor device conversion, either for all attributes or - only the ones given in :obj:`*args`.""" - return self.apply( - lambda x: x.to(device=device, non_blocking=non_blocking), *args) - - def cpu(self, *args: List[str]): - r"""Copies attributes to CPU memory, either for all attributes or only - the ones given in :obj:`*args`.""" - return self.apply(lambda x: x.cpu(), *args) - - def cuda(self, device: Optional[Union[int, str]] = None, *args: List[str], - non_blocking: bool = False): - r"""Copies attributes to CUDA memory, either for all attributes or only - the ones given in :obj:`*args`.""" - # Some PyTorch tensor like objects require a default value for `cuda`: - device = 'cuda' if device is None else device - return self.apply(lambda x: x.cuda(device, non_blocking=non_blocking), - *args) - - def pin_memory(self, *args: List[str]): - r"""Copies attributes to pinned memory, either for all attributes or - only the ones given in :obj:`*args`.""" - return self.apply(lambda x: x.pin_memory(), *args) - - def share_memory_(self, *args: List[str]): - r"""Moves attributes to shared memory, either for all attributes or - only the ones given in :obj:`*args`.""" - return self.apply_(lambda x: x.share_memory_(), *args) - - def detach_(self, *args: List[str]): - r"""Detaches attributes from the computation graph, either for all - attributes or only the ones given in :obj:`*args`.""" - return self.apply_(lambda x: x.detach_(), *args) - - def detach(self, *args: List[str]): - r"""Detaches attributes from the computation graph by creating a new - tensor, either for all attributes or only the ones given in - :obj:`*args`.""" - return self.apply(lambda x: x.detach(), *args) - - def requires_grad_(self, *args: List[str], requires_grad: bool = True): - r"""Tracks gradient computation, either for all attributes or only the - ones given in :obj:`*args`.""" - return self.apply_( - lambda x: x.requires_grad_(requires_grad=requires_grad), *args) - - def record_stream(self, stream: torch.cuda.Stream, *args: List[str]): - r"""Ensures that the tensor memory is not reused for another tensor - until all current work queued on :obj:`stream` has been completed, - either for all attributes or only the ones given in :obj:`*args`.""" - return self.apply_(lambda x: x.record_stream(stream), *args) - - @property - def is_cuda(self) -> bool: - r"""Returns :obj:`True` if any :class:`torch.Tensor` attribute is - stored on the GPU, :obj:`False` otherwise.""" - for store in self.stores: - for value in store.values(): - if isinstance(value, Tensor) and value.is_cuda: - return True - return False - - # Deprecated functions #################################################### - - @deprecated(details="use 'has_isolated_nodes' instead") - def contains_isolated_nodes(self) -> bool: - return self.has_isolated_nodes() - - @deprecated(details="use 'has_self_loops' instead") - def contains_self_loops(self) -> bool: - return self.has_self_loops() - - -############################################################################### - - -@dataclass -class DataTensorAttr(TensorAttr): - r"""Tensor attribute for `Data` without group name.""" - def __init__( - self, - attr_name=_field_status.UNSET, - index=None, - ): - super().__init__(None, attr_name, index) - - -@dataclass -class DataEdgeAttr(EdgeAttr): - r"""Edge attribute class for `Data` without edge type.""" - def __init__( - self, - layout: Optional[EdgeLayout] = None, - is_sorted: bool = False, - size: Optional[Tuple[int, int]] = None, - ): - super().__init__(None, layout, is_sorted, size) - - -############################################################################### - - -class Data(BaseData, FeatureStore, GraphStore): - r"""A data object describing a homogeneous graph. - The data object can hold node-level, link-level and graph-level attributes. - In general, :class:`~torch_geometric.data.Data` tries to mimic the - behavior of a regular Python dictionary. - In addition, it provides useful functionality for analyzing graph - structures, and provides basic PyTorch tensor functionalities. - See `here `__ for the accompanying - tutorial. - - .. code-block:: python - - from torch_geometric.data import Data - - data = Data(x=x, edge_index=edge_index, ...) - - # Add additional arguments to `data`: - data.train_idx = torch.tensor([...], dtype=torch.long) - data.test_mask = torch.tensor([...], dtype=torch.bool) - - # Analyzing the graph structure: - data.num_nodes - >>> 23 - - data.is_directed() - >>> False - - # PyTorch tensor functionality: - data = data.pin_memory() - data = data.to('cuda:0', non_blocking=True) - - Args: - x (torch.Tensor, optional): Node feature matrix with shape - :obj:`[num_nodes, num_node_features]`. (default: :obj:`None`) - edge_index (LongTensor, optional): Graph connectivity in COO format - with shape :obj:`[2, num_edges]`. (default: :obj:`None`) - edge_attr (torch.Tensor, optional): Edge feature matrix with shape - :obj:`[num_edges, num_edge_features]`. (default: :obj:`None`) - y (torch.Tensor, optional): Graph-level or node-level ground-truth - labels with arbitrary shape. (default: :obj:`None`) - pos (torch.Tensor, optional): Node position matrix with shape - :obj:`[num_nodes, num_dimensions]`. (default: :obj:`None`) - **kwargs (optional): Additional attributes. - """ - def __init__(self, x: OptTensor = None, edge_index: OptTensor = None, - edge_attr: OptTensor = None, y: OptTensor = None, - pos: OptTensor = None, **kwargs): - # `Data` doesn't support group_name, so we need to adjust `TensorAttr` - # accordingly here to avoid requiring `group_name` to be set: - super().__init__(tensor_attr_cls=DataTensorAttr) - - # `Data` doesn't support edge_type, so we need to adjust `EdgeAttr` - # accordingly here to avoid requiring `edge_type` to be set: - GraphStore.__init__(self, edge_attr_cls=DataEdgeAttr) - - self.__dict__['_store'] = GlobalStorage(_parent=self) - - if x is not None: - self.x = x - if edge_index is not None: - self.edge_index = edge_index - if edge_attr is not None: - self.edge_attr = edge_attr - if y is not None: - self.y = y - if pos is not None: - self.pos = pos - - for key, value in kwargs.items(): - setattr(self, key, value) - - def __getattr__(self, key: str) -> Any: - if '_store' not in self.__dict__: - raise RuntimeError( - "The 'data' object was created by an older version of PyG. " - "If this error occurred while loading an already existing " - "dataset, remove the 'processed/' directory in the dataset's " - "root folder and try again.") - return getattr(self._store, key) - - def __setattr__(self, key: str, value: Any): - propobj = getattr(self.__class__, key, None) - if propobj is not None and getattr(propobj, 'fset', None) is not None: - propobj.fset(self, value) - else: - setattr(self._store, key, value) - - def __delattr__(self, key: str): - delattr(self._store, key) - - # TODO consider supporting the feature store interface for - # __getitem__, __setitem__, and __delitem__ so, for example, we - # can accept key: Union[str, TensorAttr] in __getitem__. - def __getitem__(self, key: str) -> Any: - return self._store[key] - - def __setitem__(self, key: str, value: Any): - self._store[key] = value - - def __delitem__(self, key: str): - if key in self._store: - del self._store[key] - - def __copy__(self): - out = self.__class__.__new__(self.__class__) - for key, value in self.__dict__.items(): - out.__dict__[key] = value - out.__dict__['_store'] = copy.copy(self._store) - out._store._parent = out - return out - - def __deepcopy__(self, memo): - out = self.__class__.__new__(self.__class__) - for key, value in self.__dict__.items(): - out.__dict__[key] = copy.deepcopy(value, memo) - out._store._parent = out - return out - - def __repr__(self) -> str: - cls = self.__class__.__name__ - has_dict = any([isinstance(v, Mapping) for v in self._store.values()]) - - if not has_dict: - info = [size_repr(k, v) for k, v in self._store.items()] - info = ', '.join(info) - return f'{cls}({info})' - else: - info = [size_repr(k, v, indent=2) for k, v in self._store.items()] - info = ',\n'.join(info) - return f'{cls}(\n{info}\n)' - - def stores_as(self, data: 'Data'): - return self - - @property - def stores(self) -> List[BaseStorage]: - return [self._store] - - @property - def node_stores(self) -> List[NodeStorage]: - return [self._store] - - @property - def edge_stores(self) -> List[EdgeStorage]: - return [self._store] - - def to_dict(self) -> Dict[str, Any]: - return self._store.to_dict() - - def to_namedtuple(self) -> NamedTuple: - return self._store.to_namedtuple() - - def update(self, data: 'Data') -> 'Data': - for key, value in data.items(): - self[key] = value - return self - - def __cat_dim__(self, key: str, value: Any, *args, **kwargs) -> Any: - if isinstance(value, SparseTensor) and 'adj' in key: - return (0, 1) - elif 'index' in key or key == 'face': - return -1 - else: - return 0 - - def __inc__(self, key: str, value: Any, *args, **kwargs) -> Any: - if 'batch' in key: - return int(value.max()) + 1 - elif 'index' in key or key == 'face': - return self.num_nodes - else: - return 0 - - def validate(self, raise_on_error: bool = True) -> bool: - r"""Validates the correctness of the data.""" - cls_name = self.__class__.__name__ - status = True - - num_nodes = self.num_nodes - if num_nodes is None: - status = False - warn_or_raise(f"'num_nodes' is undefined in '{cls_name}'", - raise_on_error) - - if 'edge_index' in self: - if self.edge_index.dim() != 2 or self.edge_index.size(0) != 2: - status = False - warn_or_raise( - f"'edge_index' needs to be of shape [2, num_edges] in " - f"'{cls_name}' (found {self.edge_index.size()})", - raise_on_error) - - if 'edge_index' in self and self.edge_index.numel() > 0: - if self.edge_index.min() < 0: - status = False - warn_or_raise( - f"'edge_index' contains negative indices in " - f"'{cls_name}' (found {int(self.edge_index.min())})", - raise_on_error) - - if num_nodes is not None and self.edge_index.max() >= num_nodes: - status = False - warn_or_raise( - f"'edge_index' contains larger indices than the number " - f"of nodes ({num_nodes}) in '{cls_name}' " - f"(found {int(self.edge_index.max())})", raise_on_error) - - return status - - def debug(self): - pass # TODO - - def is_node_attr(self, key: str) -> bool: - r"""Returns :obj:`True` if the object at key :obj:`key` denotes a - node-level tensor attribute.""" - return self._store.is_node_attr(key) - - def is_edge_attr(self, key: str) -> bool: - r"""Returns :obj:`True` if the object at key :obj:`key` denotes an - edge-level tensor attribute.""" - return self._store.is_edge_attr(key) - - def subgraph(self, subset: Tensor) -> 'Data': - r"""Returns the induced subgraph given by the node indices - :obj:`subset`. - - Args: - subset (LongTensor or BoolTensor): The nodes to keep. - """ - if subset.dtype == torch.bool: - num_nodes = int(subset.sum()) - else: - num_nodes = subset.size(0) - subset = torch.unique(subset, sorted=True) - - out = subgraph(subset, self.edge_index, relabel_nodes=True, - num_nodes=self.num_nodes, return_edge_mask=True) - edge_index, _, edge_mask = out - - data = copy.copy(self) - - for key, value in self: - if key == 'edge_index': - data.edge_index = edge_index - elif key == 'num_nodes': - data.num_nodes = num_nodes - elif self.is_node_attr(key): - cat_dim = self.__cat_dim__(key, value) - data[key] = select(value, subset, dim=cat_dim) - elif self.is_edge_attr(key): - cat_dim = self.__cat_dim__(key, value) - data[key] = select(value, edge_mask, dim=cat_dim) - - return data - - def edge_subgraph(self, subset: Tensor) -> 'Data': - r"""Returns the induced subgraph given by the edge indices - :obj:`subset`. - Will currently preserve all the nodes in the graph, even if they are - isolated after subgraph computation. - - Args: - subset (LongTensor or BoolTensor): The edges to keep. - """ - data = copy.copy(self) - - for key, value in self: - if self.is_edge_attr(key): - cat_dim = self.__cat_dim__(key, value) - data[key] = select(value, subset, dim=cat_dim) - - return data - - def to_heterogeneous( - self, - node_type: Optional[Tensor] = None, - edge_type: Optional[Tensor] = None, - node_type_names: Optional[List[NodeType]] = None, - edge_type_names: Optional[List[EdgeType]] = None, - ): - r"""Converts a :class:`~torch_geometric.data.Data` object to a - heterogeneous :class:`~torch_geometric.data.HeteroData` object. - For this, node and edge attributes are splitted according to the - node-level and edge-level vectors :obj:`node_type` and - :obj:`edge_type`, respectively. - :obj:`node_type_names` and :obj:`edge_type_names` can be used to give - meaningful node and edge type names, respectively. - That is, the node_type :obj:`0` is given by :obj:`node_type_names[0]`. - If the :class:`~torch_geometric.data.Data` object was constructed via - :meth:`~torch_geometric.data.HeteroData.to_homogeneous`, the object can - be reconstructed without any need to pass in additional arguments. - - Args: - node_type (torch.Tensor, optional): A node-level vector denoting - the type of each node. (default: :obj:`None`) - edge_type (torch.Tensor, optional): An edge-level vector denoting - the type of each edge. (default: :obj:`None`) - node_type_names (List[str], optional): The names of node types. - (default: :obj:`None`) - edge_type_names (List[Tuple[str, str, str]], optional): The names - of edge types. (default: :obj:`None`) - """ - from torch_geometric.data import HeteroData - - if node_type is None: - node_type = self._store.get('node_type', None) - if node_type is None: - node_type = torch.zeros(self.num_nodes, dtype=torch.long) - - if node_type_names is None: - store = self._store - node_type_names = store.__dict__.get('_node_type_names', None) - if node_type_names is None: - node_type_names = [str(i) for i in node_type.unique().tolist()] - - if edge_type is None: - edge_type = self._store.get('edge_type', None) - if edge_type is None: - edge_type = torch.zeros(self.num_edges, dtype=torch.long) - - if edge_type_names is None: - store = self._store - edge_type_names = store.__dict__.get('_edge_type_names', None) - if edge_type_names is None: - edge_type_names = [] - edge_index = self.edge_index - for i in edge_type.unique().tolist(): - src, dst = edge_index[:, edge_type == i] - src_types = node_type[src].unique().tolist() - dst_types = node_type[dst].unique().tolist() - if len(src_types) != 1 and len(dst_types) != 1: - raise ValueError( - "Could not construct a 'HeteroData' object from the " - "'Data' object because single edge types span over " - "multiple node types") - edge_type_names.append((node_type_names[src_types[0]], str(i), - node_type_names[dst_types[0]])) - - # We iterate over node types to find the local node indices belonging - # to each node type. Furthermore, we create a global `index_map` vector - # that maps global node indices to local ones in the final - # heterogeneous graph: - node_ids, index_map = {}, torch.empty_like(node_type) - for i, key in enumerate(node_type_names): - node_ids[i] = (node_type == i).nonzero(as_tuple=False).view(-1) - index_map[node_ids[i]] = torch.arange(len(node_ids[i]), - device=index_map.device) - - # We iterate over edge types to find the local edge indices: - edge_ids = {} - for i, key in enumerate(edge_type_names): - edge_ids[i] = (edge_type == i).nonzero(as_tuple=False).view(-1) - - data = HeteroData() - - for i, key in enumerate(node_type_names): - for attr, value in self.items(): - if attr in {'node_type', 'edge_type', 'ptr'}: - continue - elif isinstance(value, Tensor) and self.is_node_attr(attr): - cat_dim = self.__cat_dim__(attr, value) - data[key][attr] = value.index_select(cat_dim, node_ids[i]) - - if len(data[key]) == 0: - data[key].num_nodes = node_ids[i].size(0) - - for i, key in enumerate(edge_type_names): - src, _, dst = key - for attr, value in self.items(): - if attr in {'node_type', 'edge_type', 'ptr'}: - continue - elif attr == 'edge_index': - edge_index = value[:, edge_ids[i]] - edge_index[0] = index_map[edge_index[0]] - edge_index[1] = index_map[edge_index[1]] - data[key].edge_index = edge_index - elif isinstance(value, Tensor) and self.is_edge_attr(attr): - cat_dim = self.__cat_dim__(attr, value) - data[key][attr] = value.index_select(cat_dim, edge_ids[i]) - - # Add global attributes. - exclude_keys = set(data.keys) | { - 'node_type', 'edge_type', 'edge_index', 'num_nodes', 'ptr' - } - for attr, value in self.items(): - if attr in exclude_keys: - continue - data[attr] = value - - return data - - ########################################################################### - - @classmethod - def from_dict(cls, mapping: Dict[str, Any]) -> 'Data': - r"""Creates a :class:`~torch_geometric.data.Data` object from a Python - dictionary.""" - return cls(**mapping) - - @property - def num_node_features(self) -> int: - r"""Returns the number of features per node in the graph.""" - return self._store.num_node_features - - @property - def num_features(self) -> int: - r"""Returns the number of features per node in the graph. - Alias for :py:attr:`~num_node_features`.""" - return self.num_node_features - - @property - def num_edge_features(self) -> int: - r"""Returns the number of features per edge in the graph.""" - return self._store.num_edge_features - - @property - def num_node_types(self) -> int: - r"""Returns the number of node types in the graph.""" - return int(self.node_type.max()) + 1 if 'node_type' in self else 1 - - @property - def num_edge_types(self) -> int: - r"""Returns the number of edge types in the graph.""" - return int(self.edge_type.max()) + 1 if 'edge_type' in self else 1 - - def __iter__(self) -> Iterable: - r"""Iterates over all attributes in the data, yielding their attribute - names and values.""" - for key, value in self._store.items(): - yield key, value - - def __call__(self, *args: List[str]) -> Iterable: - r"""Iterates over all attributes :obj:`*args` in the data, yielding - their attribute names and values. - If :obj:`*args` is not given, will iterate over all attributes.""" - for key, value in self._store.items(*args): - yield key, value - - @property - def x(self) -> Any: - return self['x'] if 'x' in self._store else None - - @property - def edge_index(self) -> Any: - return self['edge_index'] if 'edge_index' in self._store else None - - @property - def edge_weight(self) -> Any: - return self['edge_weight'] if 'edge_weight' in self._store else None - - @property - def edge_attr(self) -> Any: - return self['edge_attr'] if 'edge_attr' in self._store else None - - @property - def y(self) -> Any: - return self['y'] if 'y' in self._store else None - - @property - def pos(self) -> Any: - return self['pos'] if 'pos' in self._store else None - - @property - def batch(self) -> Any: - return self['batch'] if 'batch' in self._store else None - - # Deprecated functions #################################################### - - @property - @deprecated(details="use 'data.face.size(-1)' instead") - def num_faces(self) -> Optional[int]: - r"""Returns the number of faces in the mesh.""" - if 'face' in self._store and isinstance(self.face, Tensor): - return self.face.size(self.__cat_dim__('face', self.face)) - return None - - # FeatureStore interface ################################################## - - def _put_tensor(self, tensor: FeatureTensorType, attr: TensorAttr) -> bool: - out = self.get(attr.attr_name) - if out is not None and attr.index is not None: - out[attr.index] = tensor - else: - assert attr.index is None - setattr(self, attr.attr_name, tensor) - return True - - def _get_tensor(self, attr: TensorAttr) -> Optional[FeatureTensorType]: - tensor = getattr(self, attr.attr_name, None) - if tensor is not None: - # TODO this behavior is a bit odd, since TensorAttr requires that - # we set `index`. So, we assume here that indexing by `None` is - # equivalent to not indexing at all, which is not in line with - # Python semantics. - return tensor[attr.index] if attr.index is not None else tensor - return None - - def _remove_tensor(self, attr: TensorAttr) -> bool: - if hasattr(self, attr.attr_name): - delattr(self, attr.attr_name) - return True - return False - - def _get_tensor_size(self, attr: TensorAttr) -> Tuple: - return self._get_tensor(attr).size() - - def get_all_tensor_attrs(self) -> List[TensorAttr]: - r"""Obtains all feature attributes stored in `Data`.""" - return [ - TensorAttr(attr_name=name) for name in self._store.keys() - if self._store.is_node_attr(name) - ] - - # GraphStore interface #################################################### - - def _put_edge_index(self, edge_index: EdgeTensorType, - edge_attr: EdgeAttr) -> bool: - if not hasattr(self, '_edge_attrs'): - self._edge_attrs = {} - self._edge_attrs[edge_attr.layout] = edge_attr - - row, col = edge_index - - if edge_attr.layout == EdgeLayout.COO: - self.edge_index = torch.stack([row, col], dim=0) - elif edge_attr.layout == EdgeLayout.CSR: - self.adj = SparseTensor( - rowptr=row, - col=col, - sparse_sizes=edge_attr.size, - is_sorted=True, - trust_data=True, - ) - else: # edge_attr.layout == EdgeLayout.CSC: - size = edge_attr.size[::-1] if edge_attr.size is not None else None - self.adj_t = SparseTensor( - rowptr=col, - col=row, - sparse_sizes=size, - is_sorted=True, - trust_data=True, - ) - return True - - def _get_edge_index(self, edge_attr: EdgeAttr) -> Optional[EdgeTensorType]: - if edge_attr.layout == EdgeLayout.COO and 'edge_index' in self: - row, col = self.edge_index - return row, col - elif edge_attr.layout == EdgeLayout.CSR and 'adj' in self: - rowptr, col, _ = self.adj.csr() - return rowptr, col - elif edge_attr.layout == EdgeLayout.CSC and 'adj_t' in self: - colptr, row, _ = self.adj_t.csr() - return row, colptr - return None - - def _remove_edge_index(self, edge_attr: EdgeAttr) -> bool: - if edge_attr.layout == EdgeLayout.COO and 'edge_index' in self: - del self.edge_index - if hasattr(self, '_edge_attrs'): - self._edges_to_layout.pop(EdgeLayout.COO, None) - return True - elif edge_attr.layout == EdgeLayout.CSR and 'adj' in self: - del self.adj - if hasattr(self, '_edge_attrs'): - self._edges_to_layout.pop(EdgeLayout.CSR, None) - return True - elif edge_attr.layout == EdgeLayout.CSC and 'adj_t' in self: - del self.adj_t - if hasattr(self, '_edge_attrs'): - self._edges_to_layout.pop(EdgeLayout.CSC, None) - return True - return False - - def get_all_edge_attrs(self) -> List[EdgeAttr]: - edge_attrs = getattr(self, '_edge_attrs', {}) - - if 'edge_index' in self and EdgeLayout.COO not in edge_attrs: - edge_attrs[EdgeLayout.COO] = DataEdgeAttr('coo', is_sorted=False) - if 'adj' in self and EdgeLayout.CSR not in edge_attrs: - size = self.adj.sparse_sizes() - edge_attrs[EdgeLayout.CSR] = DataEdgeAttr('csr', size=size) - if 'adj_t' in self and EdgeLayout.CSC not in edge_attrs: - size = self.adj_t.sparse_sizes()[::-1] - edge_attrs[EdgeLayout.CSC] = DataEdgeAttr('csc', size=size) - - return list(edge_attrs.values()) - - -############################################################################### - - -def size_repr(key: Any, value: Any, indent: int = 0) -> str: - pad = ' ' * indent - if isinstance(value, Tensor) and value.dim() == 0: - out = value.item() - elif isinstance(value, Tensor): - out = str(list(value.size())) - elif isinstance(value, np.ndarray): - out = str(list(value.shape)) - elif isinstance(value, SparseTensor): - out = str(value.sizes())[:-1] + f', nnz={value.nnz()}]' - elif isinstance(value, str): - out = f"'{value}'" - elif isinstance(value, Sequence): - out = str([len(value)]) - elif isinstance(value, Mapping) and len(value) == 0: - out = '{}' - elif (isinstance(value, Mapping) and len(value) == 1 - and not isinstance(list(value.values())[0], Mapping)): - lines = [size_repr(k, v, 0) for k, v in value.items()] - out = '{ ' + ', '.join(lines) + ' }' - elif isinstance(value, Mapping): - lines = [size_repr(k, v, indent + 2) for k, v in value.items()] - out = '{\n' + ',\n'.join(lines) + '\n' + pad + '}' - else: - out = str(value) - - key = str(key).replace("'", '') - if isinstance(value, BaseStorage): - return f'{pad}\033[1m{key}\033[0m={out}' - else: - return f'{pad}{key}={out}' - - -def warn_or_raise(msg: str, raise_on_error: bool = True): - if raise_on_error: - raise ValueError(msg) - else: - warnings.warn(msg) diff --git a/pytorch_geometric-2.3.1/torch_geometric/data/feature_store.py b/pytorch_geometric-2.3.1/torch_geometric/data/feature_store.py deleted file mode 100644 index 6e2b33c..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/data/feature_store.py +++ /dev/null @@ -1,523 +0,0 @@ -r""" -This class defines the abstraction for a backend-agnostic feature store. The -goal of the feature store is to abstract away all node and edge feature memory -management so that varying implementations can allow for independent scale-out. - -This particular feature store abstraction makes a few key assumptions: -* The features we care about storing are node and edge features of a graph. - To this end, the attributes that the feature store supports include a - `group_name` (e.g. a heterogeneous node name or a heterogeneous edge type), - an `attr_name` (e.g. `x` or `edge_attr`), and an index. -* A feature can be uniquely identified from any associated attributes specified - in `TensorAttr`. - -It is the job of a feature store implementor class to handle these assumptions -properly. For example, a simple in-memory feature store implementation may -concatenate all metadata values with a feature index and use this as a unique -index in a KV store. More complicated implementations may choose to partition -features in interesting manners based on the provided metadata. - -Major TODOs for future implementation: -* Async `put` and `get` functionality -""" -import copy -from abc import abstractmethod -from dataclasses import dataclass -from enum import Enum -from typing import Any, List, Optional, Tuple, Union - -import numpy as np -import torch - -from torch_geometric.typing import FeatureTensorType, NodeType -from torch_geometric.utils.mixin import CastMixin - -_field_status = Enum("FieldStatus", "UNSET") - -# We allow indexing with a tensor, numpy array, Python slicing, or a single -# integer index. -IndexType = Union[torch.Tensor, np.ndarray, slice, int] - - -@dataclass -class TensorAttr(CastMixin): - r"""Defines the attributes of a :class:`FeatureStore` tensor. - It holds all the parameters necessary to uniquely identify a tensor from - the :class:`FeatureStore`. - - Note that the order of the attributes is important; this is the order in - which attributes must be provided for indexing calls. :class:`FeatureStore` - implementations can define a different ordering by overriding - :meth:`TensorAttr.__init__`. - """ - - # The group name that the tensor corresponds to. Defaults to UNSET. - group_name: Optional[NodeType] = _field_status.UNSET - - # The name of the tensor within its group. Defaults to UNSET. - attr_name: Optional[str] = _field_status.UNSET - - # The node indices the rows of the tensor correspond to. Defaults to UNSET. - index: Optional[IndexType] = _field_status.UNSET - - # Convenience methods ##################################################### - - def is_set(self, key: str) -> bool: - r"""Whether an attribute is set in :obj:`TensorAttr`.""" - assert key in self.__dataclass_fields__ - return getattr(self, key) != _field_status.UNSET - - def is_fully_specified(self) -> bool: - r"""Whether the :obj:`TensorAttr` has no unset fields.""" - return all([self.is_set(key) for key in self.__dataclass_fields__]) - - def fully_specify(self) -> 'TensorAttr': - r"""Sets all :obj:`UNSET` fields to :obj:`None`.""" - for key in self.__dataclass_fields__: - if not self.is_set(key): - setattr(self, key, None) - return self - - def update(self, attr: 'TensorAttr') -> 'TensorAttr': - r"""Updates an :class:`TensorAttr` with set attributes from another - :class:`TensorAttr`.""" - for key in self.__dataclass_fields__: - if attr.is_set(key): - setattr(self, key, getattr(attr, key)) - return self - - -class AttrView(CastMixin): - r"""Defines a view of a :class:`FeatureStore` that is obtained from a - specification of attributes on the feature store. The view stores a - reference to the backing feature store as well as a :class:`TensorAttr` - object that represents the view's state. - - Users can create views either using the :class:`AttrView` constructor, - :meth:`FeatureStore.view`, or by incompletely indexing a feature store. - For example, the following calls all create views: - - .. code-block:: python - - store[group_name] - store[group_name].feat - store[group_name, feat] - - While the following calls all materialize those views and produce tensors - by either calling the view or fully-specifying the view: - - .. code-block:: python - - store[group_name]() - store[group_name].feat[index] - store[group_name, feat][index] - """ - def __init__(self, store: 'FeatureStore', attr: TensorAttr): - self.__dict__['_store'] = store - self.__dict__['_attr'] = attr - - # Advanced indexing ####################################################### - - def __getattr__(self, key: Any) -> Union['AttrView', FeatureTensorType]: - r"""Sets the first unset field of the backing :class:`TensorAttr` - object to the attribute. - This allows for :class:`AttrView` to be indexed by different values of - attributes, in order. - In particular, for a feature store that we want to index by - :obj:`group_name` and :obj:`attr_name`, the following code will do so: - - .. code-block:: python - - store[group, attr] - store[group].attr - store.group.attr - """ - out = copy.copy(self) - - # Find the first attribute name that is UNSET: - attr_name: Optional[str] = None - for field in out._attr.__dataclass_fields__: - if getattr(out._attr, field) == _field_status.UNSET: - attr_name = field - break - - if attr_name is None: - raise AttributeError(f"Cannot access attribute '{key}' on view " - f"'{out}' as all attributes have already " - f"been set in this view") - - setattr(out._attr, attr_name, key) - - if out._attr.is_fully_specified(): - return out._store.get_tensor(out._attr) - - return out - - def __getitem__(self, key: Any) -> Union['AttrView', FeatureTensorType]: - r"""Sets the first unset field of the backing :class:`TensorAttr` - object to the attribute via indexing. - This allows for :class:`AttrView` to be indexed by different values of - attributes, in order. - In particular, for a feature store that we want to index by - :obj:`group_name` and :obj:`attr_name`, the following code will do so: - - .. code-block:: python - - store[group, attr] - store[group][attr] - - """ - return self.__getattr__(key) - - # Setting attributes ###################################################### - - def __setattr__(self, key: str, value: Any): - r"""Supports attribute assignment to the backing :class:`TensorAttr` of - an :class:`AttrView`. This allows for :class:`AttrView` objects to set - their backing attribute values. In particular, the following operation - sets the :obj:`index` of an :class:`AttrView`: - - .. code-block:: python - - view = store.view(group_name) - view.index = torch.tensor([1, 2, 3]) - """ - if key not in self._attr.__dataclass_fields__: - raise ValueError(f"Attempted to set nonexistent attribute '{key}' " - f"(acceptable attributes are " - f"{self._attr.__dataclass_fields__})") - - setattr(self._attr, key, value) - - def __setitem__(self, key: str, value: Any): - r"""Supports attribute assignment to the backing :class:`TensorAttr` of - an :class:`AttrView` via indexing. This allows for :class:`AttrView` - objects to set their backing attribute values. In particular, the - following operation sets the `index` of an :class:`AttrView`: - - .. code-block:: python - - view = store.view(TensorAttr(group_name)) - view['index'] = torch.Tensor([1, 2, 3]) - """ - self.__setattr__(key, value) - - # Miscellaneous built-ins ################################################# - - def __call__(self) -> FeatureTensorType: - r"""Supports :class:`AttrView` as a callable to force retrieval from - the currently specified attributes. In particular, this passes the - current :class:`TensorAttr` object to a GET call, regardless of whether - all attributes have been specified. It returns the result of this call. - In particular, the following operation returns a tensor by performing a - GET operation on the backing feature store: - - .. code-block:: python - - store[group_name, attr_name]() - """ - # Set all UNSET values to None: - out = copy.copy(self) - out._attr.fully_specify() - return out._store.get_tensor(out._attr) - - def __copy__(self) -> 'AttrView': - out = self.__class__.__new__(self.__class__) - for key, value in self.__dict__.items(): - out.__dict__[key] = value - out.__dict__['_attr'] = copy.copy(out.__dict__['_attr']) - return out - - def __eq__(self, obj: Any) -> bool: - r"""Compares two :class:`AttrView` objects by checking equality of - their :class:`FeatureStore` references and :class:`TensorAttr` - attributes.""" - if not isinstance(obj, AttrView): - return False - return self._store == obj._store and self._attr == obj._attr - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}(store={self._store}, ' - f'attr={self._attr})') - - -# TODO (manan, matthias) Ideally, we want to let `FeatureStore` inherit from -# `MutableMapping` to clearly indicate its behavior and usage to the user. -# However, having `MutableMapping` as a base class leads to strange behavior -# in combination with PyTorch and PyTorch Lightning, in particular since these -# libraries use customized logic during mini-batch for `Mapping` base classes. - - -class FeatureStore: - r"""An abstract base class to access features from a remote feature store. - - Args: - tensor_attr_cls (TensorAttr, optional): A user-defined - :class:`TensorAttr` class to customize the required attributes and - their ordering to unique identify tensor values. - (default: :obj:`None`) - """ - def __init__(self, tensor_attr_cls: Optional[Any] = None): - super().__init__() - self.__dict__['_tensor_attr_cls'] = tensor_attr_cls or TensorAttr - - # Core (CRUD) ############################################################# - - @abstractmethod - def _put_tensor(self, tensor: FeatureTensorType, attr: TensorAttr) -> bool: - r"""To be implemented by :class:`FeatureStore` subclasses.""" - pass - - def put_tensor(self, tensor: FeatureTensorType, *args, **kwargs) -> bool: - r"""Synchronously adds a :obj:`tensor` to the :class:`FeatureStore`. - Returns whether insertion was successful. - - Args: - tensor (torch.Tensor or np.ndarray): The feature tensor to be - added. - **kwargs (TensorAttr): Any relevant tensor attributes that - correspond to the feature tensor. See the :class:`TensorAttr` - documentation for required and optional attributes. - - Raises: - ValueError: If the input :class:`TensorAttr` is not fully - specified. - """ - attr = self._tensor_attr_cls.cast(*args, **kwargs) - if not attr.is_fully_specified(): - raise ValueError(f"The input TensorAttr '{attr}' is not fully " - f"specified. Please fully-specify the input by " - f"specifying all 'UNSET' fields") - return self._put_tensor(tensor, attr) - - @abstractmethod - def _get_tensor(self, attr: TensorAttr) -> Optional[FeatureTensorType]: - r"""To be implemented by :class:`FeatureStore` subclasses.""" - pass - - def get_tensor( - self, - *args, - convert_type: bool = False, - **kwargs, - ) -> FeatureTensorType: - r"""Synchronously obtains a :class:`tensor` from the - :class:`FeatureStore`. - - Args: - **kwargs (TensorAttr): Any relevant tensor attributes that - correspond to the feature tensor. See the :class:`TensorAttr` - documentation for required and optional attributes. - convert_type (bool, optional): Whether to convert the type of the - output tensor to the type of the attribute index. - (default: :obj:`False`) - - Raises: - ValueError: If the input :class:`TensorAttr` is not fully - specified. - KeyError: If the tensor corresponding to the input - :class:`TensorAttr` was not found. - """ - attr = self._tensor_attr_cls.cast(*args, **kwargs) - if not attr.is_fully_specified(): - raise ValueError(f"The input TensorAttr '{attr}' is not fully " - f"specified. Please fully-specify the input by " - f"specifying all 'UNSET' fields.") - - tensor = self._get_tensor(attr) - if tensor is None: - raise KeyError(f"A tensor corresponding to '{attr}' was not found") - return self._to_type(attr, tensor) if convert_type else tensor - - def _multi_get_tensor( - self, - attrs: List[TensorAttr], - ) -> List[Optional[FeatureTensorType]]: - r"""To be implemented by :class:`FeatureStore` subclasses.""" - return [self._get_tensor(attr) for attr in attrs] - - def multi_get_tensor( - self, - attrs: List[TensorAttr], - convert_type: bool = False, - ) -> List[FeatureTensorType]: - r"""Synchronously obtains a list of tensors from the - :class:`FeatureStore` for each tensor associated with the attributes in - :obj:`attrs`. - - .. note:: - The default implementation simply iterates over all calls to - :meth:`get_tensor`. Implementor classes that can provide - additional, more performant functionality are recommended to - to override this method. - - Args: - attrs (List[TensorAttr]): A list of input :class:`TensorAttr` - objects that identify the tensors to obtain. - convert_type (bool, optional): Whether to convert the type of the - output tensor to the type of the attribute index. - (default: :obj:`False`) - - Raises: - ValueError: If any input :class:`TensorAttr` is not fully - specified. - KeyError: If any of the tensors corresponding to the input - :class:`TensorAttr` was not found. - """ - attrs = [self._tensor_attr_cls.cast(attr) for attr in attrs] - bad_attrs = [attr for attr in attrs if not attr.is_fully_specified()] - if len(bad_attrs) > 0: - raise ValueError( - f"The input TensorAttr(s) '{bad_attrs}' are not fully " - f"specified. Please fully-specify them by specifying all " - f"'UNSET' fields") - - tensors = self._multi_get_tensor(attrs) - if any(v is None for v in tensors): - bad_attrs = [attrs[i] for i, v in enumerate(tensors) if v is None] - raise KeyError(f"Tensors corresponding to attributes " - f"'{bad_attrs}' were not found") - - return [ - self._to_type(attr, tensor) if convert_type else tensor - for attr, tensor in zip(attrs, tensors) - ] - - @abstractmethod - def _remove_tensor(self, attr: TensorAttr) -> bool: - r"""To be implemented by :obj:`FeatureStore` subclasses.""" - pass - - def remove_tensor(self, *args, **kwargs) -> bool: - r"""Removes a tensor from the :class:`FeatureStore`. - Returns whether deletion was successful. - - Args: - **kwargs (TensorAttr): Any relevant tensor attributes that - correspond to the feature tensor. See the :class:`TensorAttr` - documentation for required and optional attributes. - - Raises: - ValueError: If the input :class:`TensorAttr` is not fully - specified. - """ - attr = self._tensor_attr_cls.cast(*args, **kwargs) - if not attr.is_fully_specified(): - raise ValueError(f"The input TensorAttr '{attr}' is not fully " - f"specified. Please fully-specify the input by " - f"specifying all 'UNSET' fields.") - return self._remove_tensor(attr) - - def update_tensor(self, tensor: FeatureTensorType, *args, - **kwargs) -> bool: - r"""Updates a :obj:`tensor` in the :class:`FeatureStore` with a new - value. Returns whether the update was succesful. - - .. note:: - Implementor classes can choose to define more efficient update - methods; the default performs a removal and insertion. - - Args: - tensor (torch.Tensor or np.ndarray): The feature tensor to be - updated. - **kwargs (TensorAttr): Any relevant tensor attributes that - correspond to the feature tensor. See the :class:`TensorAttr` - documentation for required and optional attributes. - """ - attr = self._tensor_attr_cls.cast(*args, **kwargs) - self.remove_tensor(attr) - return self.put_tensor(tensor, attr) - - # Additional methods ###################################################### - - @abstractmethod - def _get_tensor_size(self, attr: TensorAttr) -> Optional[Tuple[int, ...]]: - pass - - def get_tensor_size(self, *args, **kwargs) -> Optional[Tuple[int, ...]]: - r"""Obtains the size of a tensor given its :class:`TensorAttr`, or - :obj:`None` if the tensor does not exist.""" - attr = self._tensor_attr_cls.cast(*args, **kwargs) - if not attr.is_set('index'): - attr.index = None - return self._get_tensor_size(attr) - - @abstractmethod - def get_all_tensor_attrs(self) -> List[TensorAttr]: - r"""Obtains all tensor attributes stored in this :class:`FeatureStore`. - """ - pass - - # `AttrView` methods ###################################################### - - def view(self, *args, **kwargs) -> AttrView: - r"""Returns a view of the :class:`FeatureStore` given a not yet - fully-specified :class:`TensorAttr`.""" - attr = self._tensor_attr_cls.cast(*args, **kwargs) - return AttrView(self, attr) - - # Helper functions ######################################################## - - @staticmethod - def _to_type( - attr: TensorAttr, - tensor: FeatureTensorType, - ) -> FeatureTensorType: - if (isinstance(attr.index, torch.Tensor) - and isinstance(tensor, np.ndarray)): - return torch.from_numpy(tensor) - if (isinstance(attr.index, np.ndarray) - and isinstance(tensor, torch.Tensor)): - return tensor.detach().cpu().numpy() - return tensor - - # Python built-ins ######################################################## - - def __setitem__(self, key: TensorAttr, value: FeatureTensorType): - r"""Supports :obj:`store[tensor_attr] = tensor`.""" - # CastMixin will handle the case of key being a tuple or TensorAttr - # object: - key = self._tensor_attr_cls.cast(key) - # We need to fully-specify the key for __setitem__ as it does not make - # sense to work with a view here: - key.fully_specify() - self.put_tensor(value, key) - - def __getitem__(self, key: TensorAttr) -> Any: - r"""Supports pythonic indexing into the :class:`FeatureStore`. - In particular, the following rules are followed for indexing: - - * A fully-specified :obj:`key` will produce a tensor output. - - * A partially-specified :obj:`key` will produce an :class:`AttrView` - output, which is a view on the :class:`FeatureStore`. If a view is - called, it will produce a tensor output from the corresponding - (partially specified) attributes. - """ - # CastMixin will handle the case of key being a tuple or TensorAttr: - attr = self._tensor_attr_cls.cast(key) - if attr.is_fully_specified(): - return self.get_tensor(attr) - # If the view is not fully-specified, return a :class:`AttrView`: - return self.view(attr) - - def __delitem__(self, key: TensorAttr): - r"""Supports :obj:`del store[tensor_attr]`.""" - # CastMixin will handle the case of key being a tuple or TensorAttr - # object: - key = self._tensor_attr_cls.cast(key) - key.fully_specify() - self.remove_tensor(key) - - def __iter__(self): - raise NotImplementedError - - def __eq__(self, obj: object) -> bool: - return id(self) == id(obj) - - @abstractmethod - def __len__(self): - pass - - def __repr__(self) -> str: - return f'{self.__class__.__name__}()' diff --git a/pytorch_geometric-2.3.1/torch_geometric/data/in_memory_dataset.py b/pytorch_geometric-2.3.1/torch_geometric/data/in_memory_dataset.py deleted file mode 100644 index 1e1b3ee..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/data/in_memory_dataset.py +++ /dev/null @@ -1,188 +0,0 @@ -import copy -import warnings -from abc import ABC -from collections.abc import Mapping, Sequence -from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union - -from torch import Tensor - -from torch_geometric.data import Batch, Data -from torch_geometric.data.collate import collate -from torch_geometric.data.dataset import Dataset, IndexType -from torch_geometric.data.separate import separate - - -class InMemoryDataset(Dataset, ABC): - r"""Dataset base class for creating graph datasets which easily fit - into CPU memory. - Inherits from :class:`torch_geometric.data.Dataset`. - See `here `__ for the accompanying - tutorial. - - Args: - root (str, optional): Root directory where the dataset should be saved. - (optional: :obj:`None`) - transform (callable, optional): A function/transform that takes in an - :obj:`torch_geometric.data.Data` object and returns a transformed - version. The data object will be transformed before every access. - (default: :obj:`None`) - pre_transform (callable, optional): A function/transform that takes in - an :obj:`torch_geometric.data.Data` object and returns a - transformed version. The data object will be transformed before - being saved to disk. (default: :obj:`None`) - pre_filter (callable, optional): A function that takes in an - :obj:`torch_geometric.data.Data` object and returns a boolean - value, indicating whether the data object should be included in the - final dataset. (default: :obj:`None`) - log (bool, optional): Whether to print any console output while - downloading and processing the dataset. (default: :obj:`True`) - """ - @property - def raw_file_names(self) -> Union[str, List[str], Tuple]: - raise NotImplementedError - - @property - def processed_file_names(self) -> Union[str, List[str], Tuple]: - raise NotImplementedError - - def __init__( - self, - root: Optional[str] = None, - transform: Optional[Callable] = None, - pre_transform: Optional[Callable] = None, - pre_filter: Optional[Callable] = None, - log: bool = True, - ): - super().__init__(root, transform, pre_transform, pre_filter, log) - self._data = None - self.slices = None - self._data_list: Optional[List[Data]] = None - - @property - def num_classes(self) -> int: - if self.transform is None: - return self._infer_num_classes(self._data.y) - return super().num_classes - - def len(self) -> int: - if self.slices is None: - return 1 - for _, value in nested_iter(self.slices): - return len(value) - 1 - return 0 - - def get(self, idx: int) -> Data: - if self.len() == 1: - return copy.copy(self._data) - - if not hasattr(self, '_data_list') or self._data_list is None: - self._data_list = self.len() * [None] - elif self._data_list[idx] is not None: - return copy.copy(self._data_list[idx]) - - data = separate( - cls=self._data.__class__, - batch=self._data, - idx=idx, - slice_dict=self.slices, - decrement=False, - ) - - self._data_list[idx] = copy.copy(data) - - return data - - @staticmethod - def collate( - data_list: List[Data]) -> Tuple[Data, Optional[Dict[str, Tensor]]]: - r"""Collates a Python list of :obj:`torch_geometric.data.Data` objects - to the internal storage format of - :class:`~torch_geometric.data.InMemoryDataset`.""" - if len(data_list) == 1: - return data_list[0], None - - data, slices, _ = collate( - data_list[0].__class__, - data_list=data_list, - increment=False, - add_batch=False, - ) - - return data, slices - - def copy(self, idx: Optional[IndexType] = None) -> 'InMemoryDataset': - r"""Performs a deep-copy of the dataset. If :obj:`idx` is not given, - will clone the full dataset. Otherwise, will only clone a subset of the - dataset from indices :obj:`idx`. - Indices can be slices, lists, tuples, and a :obj:`torch.Tensor` or - :obj:`np.ndarray` of type long or bool. - """ - if idx is None: - data_list = [self.get(i) for i in self.indices()] - else: - data_list = [self.get(i) for i in self.index_select(idx).indices()] - - dataset = copy.copy(self) - dataset._indices = None - dataset._data_list = None - dataset.data, dataset.slices = self.collate(data_list) - return dataset - - @property - def data(self) -> Any: - msg1 = ("It is not recommended to directly access the internal " - "storage format `data` of an 'InMemoryDataset'.") - msg2 = ("The given 'InMemoryDataset' only references a subset of " - "examples of the full dataset, but 'data' will contain " - "information of the full dataset.") - msg3 = ("The data of the dataset is already cached, so any " - "modifications to `data` will not be reflected when accessing " - "its elements. Clearing the cache now by removing all " - "elements in `dataset._data_list`.") - msg4 = ("If you are absolutely certain what you are doing, access the " - "internal storage via `InMemoryDataset._data` instead to " - "suppress this warning. Alternatively, you can access stacked " - "individual attributes of every graph via " - "`dataset.{attr_name}`.") - - msg = msg1 - if self._indices is not None: - msg += f' {msg2}' - if self._data_list is not None: - msg += f' {msg3}' - self._data_list = None - msg += f' {msg4}' - - warnings.warn(msg) - - return self._data - - @data.setter - def data(self, value: Any): - self._data = value - self._data_list = None - - def __getattr__(self, key: str) -> Any: - data = self.__dict__.get('_data') - if isinstance(data, Data) and key in data: - if self._indices is None and data.__inc__(key, data[key]) == 0: - return data[key] - else: - data_list = [self.get(i) for i in self.indices()] - return Batch.from_data_list(data_list)[key] - - raise AttributeError(f"'{self.__class__.__name__}' object has no " - f"attribute '{key}'") - - -def nested_iter(node: Union[Mapping, Sequence]) -> Iterable: - if isinstance(node, Mapping): - for key, value in node.items(): - for inner_key, inner_value in nested_iter(value): - yield inner_key, inner_value - elif isinstance(node, Sequence): - for i, inner_value in enumerate(node): - yield i, inner_value - else: - yield None, node diff --git a/pytorch_geometric-2.3.1/torch_geometric/data/storage.py b/pytorch_geometric-2.3.1/torch_geometric/data/storage.py deleted file mode 100644 index 41a68c6..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/data/storage.py +++ /dev/null @@ -1,695 +0,0 @@ -import copy -import warnings -import weakref -from collections import defaultdict, namedtuple -from collections.abc import Mapping, MutableMapping, Sequence -from enum import Enum -from typing import ( - Any, - Callable, - Dict, - Iterable, - List, - NamedTuple, - Optional, - Set, - Tuple, - Union, -) - -import numpy as np -import torch -from torch import Tensor - -from torch_geometric.data.view import ItemsView, KeysView, ValuesView -from torch_geometric.typing import EdgeType, NodeType, SparseTensor -from torch_geometric.utils import ( - coalesce, - contains_isolated_nodes, - is_torch_sparse_tensor, - is_undirected, -) - -N_KEYS = {'x', 'feat', 'pos', 'batch', 'node_type', 'n_id'} -E_KEYS = {'edge_index', 'edge_weight', 'edge_attr', 'edge_type', 'e_id'} - - -class AttrType(Enum): - NODE = 'NODE' - EDGE = 'EDGE' - OTHER = 'OTHER' - - -class BaseStorage(MutableMapping): - # This class wraps a Python dictionary and extends it as follows: - # 1. It allows attribute assignments, e.g.: - # `storage.x = ...` in addition to `storage['x'] = ...` - # 2. It allows private attributes that are not exposed to the user, e.g.: - # `storage._{key} = ...` and accessible via `storage._{key}` - # 3. It holds an (optional) weak reference to its parent object, e.g.: - # `storage._parent = weakref.ref(parent)` - # 4. It allows iterating over only a subset of keys, e.g.: - # `storage.values('x', 'y')` or `storage.items('x', 'y') - # 5. It adds additional PyTorch Tensor functionality, e.g.: - # `storage.cpu()`, `storage.cuda()` or `storage.share_memory_()`. - def __init__(self, _mapping: Optional[Dict[str, Any]] = None, **kwargs): - super().__init__() - self._mapping = {} - for key, value in (_mapping or {}).items(): - setattr(self, key, value) - for key, value in kwargs.items(): - setattr(self, key, value) - - @property - def _key(self) -> Any: - return None - - def _pop_cache(self, key: str): - for cache in getattr(self, '_cached_attr', {}).values(): - cache.discard(key) - - def __len__(self) -> int: - return len(self._mapping) - - def __getattr__(self, key: str) -> Any: - if key == '_mapping': - self._mapping = {} - return self._mapping - try: - return self[key] - except KeyError: - raise AttributeError( - f"'{self.__class__.__name__}' object has no attribute '{key}'") - - def __setattr__(self, key: str, value: Any): - propobj = getattr(self.__class__, key, None) - if propobj is not None and getattr(propobj, 'fset', None) is not None: - propobj.fset(self, value) - elif key == '_parent': - self.__dict__[key] = weakref.ref(value) - elif key[:1] == '_': - self.__dict__[key] = value - else: - self._pop_cache(key) - self[key] = value - - def __delattr__(self, key: str): - if key[:1] == '_': - del self.__dict__[key] - else: - self._pop_cache(key) - del self[key] - - def __getitem__(self, key: str) -> Any: - return self._mapping[key] - - def __setitem__(self, key: str, value: Any): - self._pop_cache(key) - if value is None and key in self._mapping: - del self._mapping[key] - elif value is not None: - self._mapping[key] = value - - def __delitem__(self, key: str): - if key in self._mapping: - self._pop_cache(key) - del self._mapping[key] - - def __iter__(self) -> Iterable: - return iter(self._mapping) - - def __copy__(self): - out = self.__class__.__new__(self.__class__) - for key, value in self.__dict__.items(): - out.__dict__[key] = value - out._mapping = copy.copy(out._mapping) - return out - - def __deepcopy__(self, memo): - out = self.__class__.__new__(self.__class__) - for key, value in self.__dict__.items(): - out.__dict__[key] = value - out._mapping = copy.deepcopy(out._mapping, memo) - return out - - def __getstate__(self) -> Dict[str, Any]: - out = self.__dict__.copy() - - _parent = out.get('_parent', None) - if _parent is not None: - out['_parent'] = _parent() - - return out - - def __setstate__(self, mapping: Dict[str, Any]): - for key, value in mapping.items(): - self.__dict__[key] = value - - _parent = self.__dict__.get('_parent', None) - if _parent is not None: - self.__dict__['_parent'] = weakref.ref(_parent) - - def __repr__(self) -> str: - return repr(self._mapping) - - # Allow iterating over subsets ############################################ - - # In contrast to standard `keys()`, `values()` and `items()` functions of - # Python dictionaries, we allow to only iterate over a subset of items - # denoted by a list of keys `args`. - # This is especially useful for adding PyTorch Tensor functionality to the - # storage object, e.g., in case we only want to transfer a subset of keys - # to the GPU (i.e. the ones that are relevant to the deep learning model). - - def keys(self, *args: List[str]) -> KeysView: - return KeysView(self._mapping, *args) - - def values(self, *args: List[str]) -> ValuesView: - return ValuesView(self._mapping, *args) - - def items(self, *args: List[str]) -> ItemsView: - return ItemsView(self._mapping, *args) - - def apply_(self, func: Callable, *args: List[str]): - r"""Applies the in-place function :obj:`func`, either to all attributes - or only the ones given in :obj:`*args`.""" - for value in self.values(*args): - recursive_apply_(value, func) - return self - - def apply(self, func: Callable, *args: List[str]): - r"""Applies the function :obj:`func`, either to all attributes or only - the ones given in :obj:`*args`.""" - for key, value in self.items(*args): - self[key] = recursive_apply(value, func) - return self - - # Additional functionality ################################################ - - def get(self, key: str, value: Optional[Any] = None) -> Any: - return self._mapping.get(key, value) - - def to_dict(self) -> Dict[str, Any]: - r"""Returns a dictionary of stored key/value pairs.""" - return copy.copy(self._mapping) - - def to_namedtuple(self) -> NamedTuple: - r"""Returns a :obj:`NamedTuple` of stored key/value pairs.""" - field_names = list(self.keys()) - typename = f'{self.__class__.__name__}Tuple' - StorageTuple = namedtuple(typename, field_names) - return StorageTuple(*[self[key] for key in field_names]) - - def clone(self, *args: List[str]): - r"""Performs a deep-copy of the object.""" - return copy.deepcopy(self) - - def contiguous(self, *args: List[str]): - r"""Ensures a contiguous memory layout, either for all attributes or - only the ones given in :obj:`*args`.""" - return self.apply(lambda x: x.contiguous(), *args) - - def to(self, device: Union[int, str], *args: List[str], - non_blocking: bool = False): - r"""Performs tensor dtype and/or device conversion, either for all - attributes or only the ones given in :obj:`*args`.""" - return self.apply( - lambda x: x.to(device=device, non_blocking=non_blocking), *args) - - def cpu(self, *args: List[str]): - r"""Copies attributes to CPU memory, either for all attributes or only - the ones given in :obj:`*args`.""" - return self.apply(lambda x: x.cpu(), *args) - - def cuda(self, device: Optional[Union[int, str]] = None, *args: List[str], - non_blocking: bool = False): # pragma: no cover - r"""Copies attributes to CUDA memory, either for all attributes or only - the ones given in :obj:`*args`.""" - return self.apply(lambda x: x.cuda(device, non_blocking=non_blocking), - *args) - - def pin_memory(self, *args: List[str]): # pragma: no cover - r"""Copies attributes to pinned memory, either for all attributes or - only the ones given in :obj:`*args`.""" - return self.apply(lambda x: x.pin_memory(), *args) - - def share_memory_(self, *args: List[str]): - r"""Moves attributes to shared memory, either for all attributes or - only the ones given in :obj:`*args`.""" - return self.apply(lambda x: x.share_memory_(), *args) - - def detach_(self, *args: List[str]): - r"""Detaches attributes from the computation graph, either for all - attributes or only the ones given in :obj:`*args`.""" - return self.apply(lambda x: x.detach_(), *args) - - def detach(self, *args: List[str]): - r"""Detaches attributes from the computation graph by creating a new - tensor, either for all attributes or only the ones given in - :obj:`*args`.""" - return self.apply(lambda x: x.detach(), *args) - - def requires_grad_(self, *args: List[str], requires_grad: bool = True): - r"""Tracks gradient computation, either for all attributes or only the - ones given in :obj:`*args`.""" - return self.apply( - lambda x: x.requires_grad_(requires_grad=requires_grad), *args) - - def record_stream(self, stream: torch.cuda.Stream, *args: List[str]): - r"""Ensures that the tensor memory is not reused for another tensor - until all current work queued on :obj:`stream` has been completed, - either for all attributes or only the ones given in :obj:`*args`.""" - return self.apply_(lambda x: x.record_stream(stream), *args) - - -class NodeStorage(BaseStorage): - @property - def _key(self) -> NodeType: - key = self.__dict__.get('_key', None) - if key is None or not isinstance(key, str): - raise ValueError("'_key' does not denote a valid node type") - return key - - @property - def can_infer_num_nodes(self): - keys = set(self.keys()) - num_node_keys = { - 'num_nodes', 'x', 'pos', 'batch', 'adj', 'adj_t', 'edge_index', - 'face' - } - if len(keys & num_node_keys) > 0: - return True - elif len([key for key in keys if 'node' in key]) > 0: - return True - else: - return False - - @property - def num_nodes(self) -> Optional[int]: - # We sequentially access attributes that reveal the number of nodes. - if 'num_nodes' in self: - return self['num_nodes'] - for key, value in self.items(): - if isinstance(value, (Tensor, np.ndarray)) and key in N_KEYS: - cat_dim = self._parent().__cat_dim__(key, value, self) - return value.shape[cat_dim] - for key, value in self.items(): - if isinstance(value, (Tensor, np.ndarray)) and 'node' in key: - cat_dim = self._parent().__cat_dim__(key, value, self) - return value.shape[cat_dim] - if 'adj' in self and isinstance(self.adj, SparseTensor): - return self.adj.size(0) - if 'adj_t' in self and isinstance(self.adj_t, SparseTensor): - return self.adj_t.size(1) - warnings.warn( - f"Unable to accurately infer 'num_nodes' from the attribute set " - f"'{set(self.keys())}'. Please explicitly set 'num_nodes' as an " - f"attribute of " + - ("'data'" if self._key is None else f"'data[{self._key}]'") + - " to suppress this warning") - if 'edge_index' in self and isinstance(self.edge_index, Tensor): - if self.edge_index.numel() > 0: - return int(self.edge_index.max()) + 1 - else: - return 0 - if 'face' in self and isinstance(self.face, Tensor): - if self.face.numel() > 0: - return int(self.face.max()) + 1 - else: - return 0 - return None - - @property - def num_node_features(self) -> int: - if 'x' in self and isinstance(self.x, (Tensor, np.ndarray)): - return 1 if self.x.ndim == 1 else self.x.shape[-1] - if 'x' in self and isinstance(self.x, SparseTensor): - return 1 if self.x.dim() == 1 else self.x.size(-1) - return 0 - - @property - def num_features(self) -> int: - return self.num_node_features - - def is_node_attr(self, key: str) -> bool: - if '_cached_attr' not in self.__dict__: - self._cached_attr: Dict[AttrType, Set[str]] = defaultdict(set) - - if key in self._cached_attr[AttrType.NODE]: - return True - if key in self._cached_attr[AttrType.OTHER]: - return False - - value = self[key] - - if isinstance(value, (list, tuple)) and len(value) == self.num_nodes: - self._cached_attr[AttrType.NODE].add(key) - return True - - if not isinstance(value, (Tensor, np.ndarray)): - self._cached_attr[AttrType.OTHER].add(key) - return False - - if value.ndim == 0: - self._cached_attr[AttrType.OTHER].add(key) - return False - - cat_dim = self._parent().__cat_dim__(key, value, self) - if value.shape[cat_dim] != self.num_nodes: - self._cached_attr[AttrType.OTHER].add(key) - return False - - self._cached_attr[AttrType.NODE].add(key) - return True - - def is_edge_attr(self, key: str) -> bool: - return False - - def node_attrs(self) -> List[str]: - return [key for key in self.keys() if self.is_node_attr(key)] - - -class EdgeStorage(BaseStorage): - r"""We support multiple ways to store edge connectivity in a - :class:`EdgeStorage` object: - - * :obj:`edge_index`: A :class:`torch.LongTensor` holding edge indices in - COO format with shape :obj:`[2, num_edges]` (the default format) - - * :obj:`adj`: A :class:`torch_sparse.SparseTensor` holding edge indices in - a sparse format, supporting both COO and CSR format. - - * :obj:`adj_t`: A **transposed** :class:`torch_sparse.SparseTensor` holding - edge indices in a sparse format, supporting both COO and CSR format. - This is the most efficient one for graph-based deep learning models as - indices are sorted based on target nodes. - """ - @property - def _key(self) -> EdgeType: - key = self.__dict__.get('_key', None) - if key is None or not isinstance(key, tuple) or not len(key) == 3: - raise ValueError("'_key' does not denote a valid edge type") - return key - - @property - def edge_index(self) -> Tensor: - if 'edge_index' in self: - return self['edge_index'] - if 'adj' in self and isinstance(self.adj, SparseTensor): - return torch.stack(self.adj.coo()[:2], dim=0) - if 'adj_t' in self and isinstance(self.adj_t, SparseTensor): - return torch.stack(self.adj_t.coo()[:2][::-1], dim=0) - raise AttributeError( - f"'{self.__class__.__name__}' object has no attribute " - f"'edge_index', 'adj' or 'adj_t'") - - @property - def num_edges(self) -> int: - # We sequentially access attributes that reveal the number of edges. - if 'num_edges' in self: - return self['num_edges'] - for key, value in self.items(): - if isinstance(value, (Tensor, np.ndarray)) and key in E_KEYS: - cat_dim = self._parent().__cat_dim__(key, value, self) - return value.shape[cat_dim] - for key, value in self.items(): - if isinstance(value, (Tensor, np.ndarray)) and 'edge' in key: - cat_dim = self._parent().__cat_dim__(key, value, self) - return value.shape[cat_dim] - for value in self.values('adj', 'adj_t'): - if isinstance(value, SparseTensor): - return value.nnz() - elif is_torch_sparse_tensor(value): - return value._nnz() - return 0 - - @property - def num_edge_features(self) -> int: - if ('edge_attr' in self and isinstance(self.edge_attr, - (Tensor, np.ndarray))): - return 1 if self.edge_attr.ndim == 1 else self.edge_attr.shape[-1] - return 0 - - @property - def num_features(self) -> int: - return self.num_edge_features - - def size( - self, dim: Optional[int] = None - ) -> Union[Tuple[Optional[int], Optional[int]], Optional[int]]: - - if self._key is None: - raise NameError("Unable to infer 'size' without explicit " - "'_key' assignment") - - size = (self._parent()[self._key[0]].num_nodes, - self._parent()[self._key[-1]].num_nodes) - - return size if dim is None else size[dim] - - def is_node_attr(self, key: str) -> bool: - return False - - def is_edge_attr(self, key: str) -> bool: - if '_cached_attr' not in self.__dict__: - self._cached_attr: Dict[AttrType, Set[str]] = defaultdict(set) - - if key in self._cached_attr[AttrType.EDGE]: - return True - if key in self._cached_attr[AttrType.OTHER]: - return False - - value = self[key] - - if isinstance(value, (list, tuple)) and len(value) == self.num_edges: - self._cached_attr[AttrType.EDGE].add(key) - return True - - if not isinstance(value, (Tensor, np.ndarray)): - self._cached_attr[AttrType.OTHER].add(key) - return False - - if value.ndim == 0: - self._cached_attr[AttrType.OTHER].add(key) - return False - - cat_dim = self._parent().__cat_dim__(key, value, self) - if value.shape[cat_dim] != self.num_edges: - self._cached_attr[AttrType.OTHER].add(key) - return False - - self._cached_attr[AttrType.EDGE].add(key) - return True - - def edge_attrs(self) -> List[str]: - return [key for key in self.keys() if self.is_edge_attr(key)] - - def is_coalesced(self) -> bool: - for value in self.values('adj', 'adj_t'): - return value.is_coalesced() - - if 'edge_index' in self: - new_edge_index = coalesce( - self.edge_index, - num_nodes=max(self.size(0), self.size(1)), - ) - return (self.edge_index.numel() == new_edge_index.numel() - and torch.equal(self.edge_index, new_edge_index)) - - return True - - def coalesce(self, reduce: str = 'sum'): - for key, value in self.items('adj', 'adj_t'): - self[key] = value.coalesce(reduce) - - if 'edge_index' in self: - if 'edge_attr' in self: - self.edge_index, self.edge_attr = coalesce( - self.edge_index, - self.edge_attr, - num_nodes=max(self.size(0), self.size(1)), - ) - else: - self.edge_index = coalesce( - self.edge_index, - num_nodes=max(self.size(0), self.size(1)), - ) - - return self - - def has_isolated_nodes(self) -> bool: - edge_index, num_nodes = self.edge_index, self.size(1) - if num_nodes is None: - raise NameError("Unable to infer 'num_nodes'") - if self.is_bipartite(): - return torch.unique(edge_index[1]).numel() < num_nodes - else: - return contains_isolated_nodes(edge_index, num_nodes) - - def has_self_loops(self) -> bool: - if self.is_bipartite(): - return False - edge_index = self.edge_index - return int((edge_index[0] == edge_index[1]).sum()) > 0 - - def is_undirected(self) -> bool: - if self.is_bipartite(): - return False - - for value in self.values('adj', 'adj_t'): - return value.is_symmetric() - - edge_index = self.edge_index - edge_attr = self.edge_attr if 'edge_attr' in self else None - return is_undirected(edge_index, edge_attr, num_nodes=self.size(0)) - - def is_directed(self) -> bool: - return not self.is_undirected() - - def is_bipartite(self) -> bool: - return self._key is not None and self._key[0] != self._key[-1] - - -class GlobalStorage(NodeStorage, EdgeStorage): - @property - def _key(self) -> Any: - return None - - @property - def num_features(self) -> int: - return self.num_node_features - - def size( - self, dim: Optional[int] = None - ) -> Union[Tuple[Optional[int], Optional[int]], Optional[int]]: - size = (self.num_nodes, self.num_nodes) - return size if dim is None else size[dim] - - def is_node_attr(self, key: str) -> bool: - if '_cached_attr' not in self.__dict__: - self._cached_attr: Dict[AttrType, Set[str]] = defaultdict(set) - - if key in self._cached_attr[AttrType.NODE]: - return True - if key in self._cached_attr[AttrType.EDGE]: - return False - if key in self._cached_attr[AttrType.OTHER]: - return False - - value = self[key] - - if isinstance(value, (list, tuple)) and len(value) == self.num_nodes: - self._cached_attr[AttrType.NODE].add(key) - return True - - if not isinstance(value, (Tensor, np.ndarray)): - self._cached_attr[AttrType.OTHER].add(key) - return False - - if value.ndim == 0: - self._cached_attr[AttrType.OTHER].add(key) - return False - - cat_dim = self._parent().__cat_dim__(key, value, self) - num_nodes, num_edges = self.num_nodes, self.num_edges - - if value.shape[cat_dim] != num_nodes: - if value.shape[cat_dim] == num_edges: - self._cached_attr[AttrType.EDGE].add(key) - else: - self._cached_attr[AttrType.OTHER].add(key) - return False - - if num_nodes != num_edges: - self._cached_attr[AttrType.NODE].add(key) - return True - - if 'edge' not in key: - self._cached_attr[AttrType.NODE].add(key) - return True - else: - self._cached_attr[AttrType.EDGE].add(key) - return False - - def is_edge_attr(self, key: str) -> bool: - if '_cached_attr' not in self.__dict__: - self._cached_attr: Dict[AttrType, Set[str]] = defaultdict(set) - - if key in self._cached_attr[AttrType.EDGE]: - return True - if key in self._cached_attr[AttrType.NODE]: - return False - if key in self._cached_attr[AttrType.OTHER]: - return False - - value = self[key] - - if isinstance(value, (list, tuple)) and len(value) == self.num_edges: - self._cached_attr[AttrType.EDGE].add(key) - return True - - if not isinstance(value, (Tensor, np.ndarray)): - self._cached_attr[AttrType.OTHER].add(key) - return False - - if value.ndim == 0: - self._cached_attr[AttrType.OTHER].add(key) - return False - - cat_dim = self._parent().__cat_dim__(key, value, self) - num_nodes, num_edges = self.num_nodes, self.num_edges - - if value.shape[cat_dim] != num_edges: - if value.shape[cat_dim] == num_nodes: - self._cached_attr[AttrType.NODE].add(key) - else: - self._cached_attr[AttrType.OTHER].add(key) - return False - - if num_edges != num_nodes: - self._cached_attr[AttrType.EDGE].add(key) - return True - - if 'edge' in key: - self._cached_attr[AttrType.EDGE].add(key) - return True - else: - self._cached_attr[AttrType.NODE].add(key) - return False - - -def recursive_apply_(data: Any, func: Callable): - if isinstance(data, Tensor): - func(data) - elif isinstance(data, tuple) and hasattr(data, '_fields'): # namedtuple - for value in data: - recursive_apply_(value, func) - elif isinstance(data, Sequence) and not isinstance(data, str): - for value in data: - recursive_apply_(value, func) - elif isinstance(data, Mapping): - for value in data.values(): - recursive_apply_(value, func) - else: - try: - func(data) - except: # noqa - pass - - -def recursive_apply(data: Any, func: Callable) -> Any: - if isinstance(data, Tensor): - return func(data) - elif isinstance(data, torch.nn.utils.rnn.PackedSequence): - return func(data) - elif isinstance(data, tuple) and hasattr(data, '_fields'): # namedtuple - return type(data)(*(recursive_apply(d, func) for d in data)) - elif isinstance(data, Sequence) and not isinstance(data, str): - return [recursive_apply(d, func) for d in data] - elif isinstance(data, Mapping): - return {key: recursive_apply(data[key], func) for key in data} - else: - try: - return func(data) - except: # noqa - return data diff --git a/pytorch_geometric-2.3.1/torch_geometric/data/summary.py b/pytorch_geometric-2.3.1/torch_geometric/data/summary.py deleted file mode 100644 index c8570a3..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/data/summary.py +++ /dev/null @@ -1,92 +0,0 @@ -from dataclasses import dataclass -from typing import List, Optional, Union - -import torch -from tqdm import tqdm - -from torch_geometric.data import Dataset - - -@dataclass -class Stats: - mean: float - std: float - min: float - quantile25: float - median: float - quantile75: float - max: float - - @classmethod - def from_data(cls, data: Union[List[int], List[float], torch.Tensor]): - if not isinstance(data, torch.Tensor): - data = torch.tensor(data) - data = data.to(torch.float) - - return cls( - mean=data.mean().item(), - std=data.std().item(), - min=data.min().item(), - quantile25=data.quantile(0.25).item(), - median=data.median().item(), - quantile75=data.quantile(0.75).item(), - max=data.max().item(), - ) - - -@dataclass(repr=False) -class Summary: - name: str - num_graphs: int - num_nodes: Stats - num_edges: Stats - - @classmethod - def from_dataset( - cls, - dataset: Dataset, - progress_bar: Optional[bool] = None, - ): - r"""Creates a summary of a :class:`~torch_geometric.data.Dataset` - object. - - Args: - dataset (Dataset): The dataset. - progress_bar (bool, optional). If set to :obj:`True`, will show a - progress bar during stats computation. If set to :obj:`None`, - will automatically decide whether to show a progress bar based - on dataset size. (default: :obj:`None`) - """ - name = dataset.__class__.__name__ - - if progress_bar is None: - progress_bar = len(dataset) >= 10000 - - if progress_bar: - dataset = tqdm(dataset) - - num_nodes_list, num_edges_list = [], [] - for data in dataset: - num_nodes_list.append(data.num_nodes) - num_edges_list.append(data.num_edges) - - return cls( - name=name, - num_graphs=len(dataset), - num_nodes=Stats.from_data(num_nodes_list), - num_edges=Stats.from_data(num_edges_list), - ) - - def __repr__(self) -> str: - from tabulate import tabulate - - prefix = f'{self.name} (#graphs={self.num_graphs}):\n' - - content = [['', '#nodes', '#edges']] - stats = [self.num_nodes, self.num_edges] - for field in Stats.__dataclass_fields__: - row = [field] + [f'{getattr(s, field):.1f}' for s in stats] - content.append(row) - body = tabulate(content, headers='firstrow', tablefmt='psql') - - return prefix + body diff --git a/pytorch_geometric-2.3.1/torch_geometric/datasets/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/datasets/__init__.py deleted file mode 100644 index 2bdddc5..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/datasets/__init__.py +++ /dev/null @@ -1,184 +0,0 @@ -from .karate import KarateClub -from .tu_dataset import TUDataset -from .gnn_benchmark_dataset import GNNBenchmarkDataset -from .planetoid import Planetoid -from .fake import FakeDataset, FakeHeteroDataset -from .nell import NELL -from .citation_full import CitationFull, CoraFull -from .coauthor import Coauthor -from .amazon import Amazon -from .ppi import PPI -from .reddit import Reddit -from .reddit2 import Reddit2 -from .flickr import Flickr -from .yelp import Yelp -from .amazon_products import AmazonProducts -from .qm7 import QM7b -from .qm9 import QM9 -from .md17 import MD17 -from .zinc import ZINC -from .aqsol import AQSOL -from .molecule_net import MoleculeNet -from .entities import Entities -from .rel_link_pred_dataset import RelLinkPredDataset -from .ged_dataset import GEDDataset -from .attributed_graph_dataset import AttributedGraphDataset -from .mnist_superpixels import MNISTSuperpixels -from .faust import FAUST -from .dynamic_faust import DynamicFAUST -from .shapenet import ShapeNet -from .modelnet import ModelNet -from .coma import CoMA -from .shrec2016 import SHREC2016 -from .tosca import TOSCA -from .pcpnet_dataset import PCPNetDataset -from .s3dis import S3DIS -from .geometry import GeometricShapes -from .bitcoin_otc import BitcoinOTC -from .icews import ICEWS18 -from .gdelt import GDELT -from .willow_object_class import WILLOWObjectClass -from .dbp15k import DBP15K -from .pascal import PascalVOCKeypoints -from .pascal_pf import PascalPF -from .snap_dataset import SNAPDataset -from .suite_sparse import SuiteSparseMatrixCollection -from .aminer import AMiner -from .word_net import WordNet18, WordNet18RR -from .freebase import FB15k_237 -from .wikics import WikiCS -from .webkb import WebKB -from .wikipedia_network import WikipediaNetwork -from .heterophilous_graph_dataset import HeterophilousGraphDataset -from .actor import Actor -from .ogb_mag import OGB_MAG -from .dblp import DBLP -from .movie_lens import MovieLens -from .imdb import IMDB -from .last_fm import LastFM -from .hgb_dataset import HGBDataset -from .jodie import JODIEDataset -from .mixhop_synthetic_dataset import MixHopSyntheticDataset -from .upfd import UPFD -from .github import GitHub -from .facebook import FacebookPagePage -from .lastfm_asia import LastFMAsia -from .deezer_europe import DeezerEurope -from .gemsec import GemsecDeezer -from .twitch import Twitch -from .airports import Airports -from .ba_shapes import BAShapes -from .lrgb import LRGBDataset -from .malnet_tiny import MalNetTiny -from .omdb import OMDB -from .polblogs import PolBlogs -from .email_eu_core import EmailEUCore -from .sbm_dataset import StochasticBlockModelDataset -from .sbm_dataset import RandomPartitionGraphDataset -from .linkx_dataset import LINKXDataset -from .elliptic import EllipticBitcoinDataset -from .dgraph import DGraphFin -from .hydro_net import HydroNet -from .explainer_dataset import ExplainerDataset -from .infection_dataset import InfectionDataset -from .ba2motif_dataset import BA2MotifDataset -from .ba_multi_shapes import BAMultiShapesDataset -from .airfrans import AirfRANS -from .taobao import Taobao - -import torch_geometric.datasets.utils # noqa - -__all__ = [ - 'KarateClub', - 'TUDataset', - 'GNNBenchmarkDataset', - 'Planetoid', - 'FakeDataset', - 'FakeHeteroDataset', - 'NELL', - 'CitationFull', - 'CoraFull', - 'Coauthor', - 'Amazon', - 'PPI', - 'Reddit', - 'Reddit2', - 'Flickr', - 'Yelp', - 'AmazonProducts', - 'QM7b', - 'QM9', - 'MD17', - 'ZINC', - 'AQSOL', - 'MoleculeNet', - 'Entities', - 'RelLinkPredDataset', - 'GEDDataset', - 'AttributedGraphDataset', - 'MNISTSuperpixels', - 'FAUST', - 'DynamicFAUST', - 'ShapeNet', - 'ModelNet', - 'CoMA', - 'SHREC2016', - 'TOSCA', - 'PCPNetDataset', - 'S3DIS', - 'GeometricShapes', - 'BitcoinOTC', - 'ICEWS18', - 'GDELT', - 'DBP15K', - 'WILLOWObjectClass', - 'PascalVOCKeypoints', - 'PascalPF', - 'SNAPDataset', - 'SuiteSparseMatrixCollection', - 'AMiner', - 'WordNet18', - 'WordNet18RR', - 'FB15k_237', - 'WikiCS', - 'WebKB', - 'WikipediaNetwork', - 'HeterophilousGraphDataset', - 'Actor', - 'OGB_MAG', - 'DBLP', - 'MovieLens', - 'IMDB', - 'LastFM', - 'HGBDataset', - 'JODIEDataset', - 'MixHopSyntheticDataset', - 'UPFD', - 'GitHub', - 'FacebookPagePage', - 'LastFMAsia', - 'DeezerEurope', - 'GemsecDeezer', - 'Twitch', - 'Airports', - 'BAShapes', - 'LRGBDataset', - 'MalNetTiny', - 'OMDB', - 'PolBlogs', - 'EmailEUCore', - 'StochasticBlockModelDataset', - 'RandomPartitionGraphDataset', - 'LINKXDataset', - 'EllipticBitcoinDataset', - 'DGraphFin', - 'HydroNet', - 'ExplainerDataset', - 'InfectionDataset', - 'BA2MotifDataset', - 'BAMultiShapesDataset', - 'AirfRANS', - 'Taobao', -] - -classes = __all__ diff --git a/pytorch_geometric-2.3.1/torch_geometric/datasets/planetoid.py b/pytorch_geometric-2.3.1/torch_geometric/datasets/planetoid.py deleted file mode 100644 index 9c16a37..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/datasets/planetoid.py +++ /dev/null @@ -1,168 +0,0 @@ -import os.path as osp -from typing import Callable, List, Optional - -import numpy as np -import torch - -from torch_geometric.data import InMemoryDataset, download_url -from torch_geometric.io import read_planetoid_data - - -class Planetoid(InMemoryDataset): - r"""The citation network datasets :obj:`"Cora"`, :obj:`"CiteSeer"` and - :obj:`"PubMed"` from the `"Revisiting Semi-Supervised Learning with Graph - Embeddings" `_ paper. - Nodes represent documents and edges represent citation links. - Training, validation and test splits are given by binary masks. - - Args: - root (str): Root directory where the dataset should be saved. - name (str): The name of the dataset (:obj:`"Cora"`, :obj:`"CiteSeer"`, - :obj:`"PubMed"`). - split (str, optional): The type of dataset split (:obj:`"public"`, - :obj:`"full"`, :obj:`"geom-gcn"`, :obj:`"random"`). - If set to :obj:`"public"`, the split will be the public fixed split - from the `"Revisiting Semi-Supervised Learning with Graph - Embeddings" `_ paper. - If set to :obj:`"full"`, all nodes except those in the validation - and test sets will be used for training (as in the - `"FastGCN: Fast Learning with Graph Convolutional Networks via - Importance Sampling" `_ paper). - If set to :obj:`"geom-gcn"`, the 10 public fixed splits from the - `"Geom-GCN: Geometric Graph Convolutional Networks" - `_ paper are given. - If set to :obj:`"random"`, train, validation, and test sets will be - randomly generated, according to :obj:`num_train_per_class`, - :obj:`num_val` and :obj:`num_test`. (default: :obj:`"public"`) - num_train_per_class (int, optional): The number of training samples - per class in case of :obj:`"random"` split. (default: :obj:`20`) - num_val (int, optional): The number of validation samples in case of - :obj:`"random"` split. (default: :obj:`500`) - num_test (int, optional): The number of test samples in case of - :obj:`"random"` split. (default: :obj:`1000`) - transform (callable, optional): A function/transform that takes in an - :obj:`torch_geometric.data.Data` object and returns a transformed - version. The data object will be transformed before every access. - (default: :obj:`None`) - pre_transform (callable, optional): A function/transform that takes in - an :obj:`torch_geometric.data.Data` object and returns a - transformed version. The data object will be transformed before - being saved to disk. (default: :obj:`None`) - - **STATS:** - - .. list-table:: - :widths: 10 10 10 10 10 - :header-rows: 1 - - * - Name - - #nodes - - #edges - - #features - - #classes - * - Cora - - 2,708 - - 10,556 - - 1,433 - - 7 - * - CiteSeer - - 3,327 - - 9,104 - - 3,703 - - 6 - * - PubMed - - 19,717 - - 88,648 - - 500 - - 3 - """ - url = 'https://github.com/kimiyoung/planetoid/raw/master/data' - geom_gcn_url = ('https://raw.githubusercontent.com/graphdml-uiuc-jlu/' - 'geom-gcn/master') - - def __init__(self, root: str, name: str, split: str = "public", - num_train_per_class: int = 20, num_val: int = 500, - num_test: int = 1000, transform: Optional[Callable] = None, - pre_transform: Optional[Callable] = None): - self.name = name - - self.split = split.lower() - assert self.split in ['public', 'full', 'geom-gcn', 'random'] - - super().__init__(root, transform, pre_transform) - self.data, self.slices = torch.load(self.processed_paths[0]) - - if split == 'full': - data = self.get(0) - data.train_mask.fill_(True) - data.train_mask[data.val_mask | data.test_mask] = False - self.data, self.slices = self.collate([data]) - - elif split == 'random': - data = self.get(0) - data.train_mask.fill_(False) - for c in range(self.num_classes): - idx = (data.y == c).nonzero(as_tuple=False).view(-1) - idx = idx[torch.randperm(idx.size(0))[:num_train_per_class]] - data.train_mask[idx] = True - - remaining = (~data.train_mask).nonzero(as_tuple=False).view(-1) - remaining = remaining[torch.randperm(remaining.size(0))] - - data.val_mask.fill_(False) - data.val_mask[remaining[:num_val]] = True - - data.test_mask.fill_(False) - data.test_mask[remaining[num_val:num_val + num_test]] = True - - self.data, self.slices = self.collate([data]) - - @property - def raw_dir(self) -> str: - if self.split == 'geom-gcn': - return osp.join(self.root, self.name, 'geom-gcn', 'raw') - return osp.join(self.root, self.name, 'raw') - - @property - def processed_dir(self) -> str: - if self.split == 'geom-gcn': - return osp.join(self.root, self.name, 'geom-gcn', 'processed') - return osp.join(self.root, self.name, 'processed') - - @property - def raw_file_names(self) -> List[str]: - names = ['x', 'tx', 'allx', 'y', 'ty', 'ally', 'graph', 'test.index'] - return [f'ind.{self.name.lower()}.{name}' for name in names] - - @property - def processed_file_names(self) -> str: - return 'data.pt' - - def download(self): - for name in self.raw_file_names: - download_url(f'{self.url}/{name}', self.raw_dir) - if self.split == 'geom-gcn': - for i in range(10): - url = f'{self.geom_gcn_url}/splits/{self.name.lower()}' - download_url(f'{url}_split_0.6_0.2_{i}.npz', self.raw_dir) - - def process(self): - data = read_planetoid_data(self.raw_dir, self.name) - - if self.split == 'geom-gcn': - train_masks, val_masks, test_masks = [], [], [] - for i in range(10): - name = f'{self.name.lower()}_split_0.6_0.2_{i}.npz' - splits = np.load(osp.join(self.raw_dir, name)) - train_masks.append(torch.from_numpy(splits['train_mask'])) - val_masks.append(torch.from_numpy(splits['val_mask'])) - test_masks.append(torch.from_numpy(splits['test_mask'])) - data.train_mask = torch.stack(train_masks, dim=1) - data.val_mask = torch.stack(val_masks, dim=1) - data.test_mask = torch.stack(test_masks, dim=1) - - data = data if self.pre_transform is None else self.pre_transform(data) - torch.save(self.collate([data]), self.processed_paths[0]) - - def __repr__(self) -> str: - return f'{self.name}()' diff --git a/pytorch_geometric-2.3.1/torch_geometric/datasets/utils/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/datasets/utils/__init__.py deleted file mode 100644 index 0236f00..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/datasets/utils/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .cheatsheet import paper_link, has_stats, get_stat, get_children - -__all__ = [ - 'paper_link', - 'has_stats', - 'get_stat', - 'get_children', -] diff --git a/pytorch_geometric-2.3.1/torch_geometric/datasets/utils/cheatsheet.py b/pytorch_geometric-2.3.1/torch_geometric/datasets/utils/cheatsheet.py deleted file mode 100644 index 148983e..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/datasets/utils/cheatsheet.py +++ /dev/null @@ -1,52 +0,0 @@ -import importlib -import inspect -import re -from typing import Any, List, Optional - - -def paper_link(cls: str) -> str: - cls = importlib.import_module('torch_geometric.datasets').__dict__[cls] - match = re.search('<.+?>', inspect.getdoc(cls), flags=re.DOTALL) - return None if match is None else match.group().replace('\n', ' ')[1:-1] - - -def get_stats_table(cls: str) -> str: - cls = importlib.import_module('torch_geometric.datasets').__dict__[cls] - match = re.search(r'\*\*STATS:\*\*\n.*$', inspect.getdoc(cls), - flags=re.DOTALL) - return '' if match is None else match.group() - - -def has_stats(cls: str) -> bool: - return len(get_stats_table(cls)) > 0 - - -def get_stat(cls: str, name: str, child: Optional[str] = None, - default: Any = None) -> str: - if child is None and len(get_children(cls)) > 0: - return '' - - stats_table = get_stats_table(cls) - - if len(stats_table) > 0: - stats_table = '\n'.join(stats_table.split('\n')[2:]) - - match = re.search(f'^.*- {name}', stats_table, flags=re.DOTALL) - if match is None: - return default - - column = match.group().count(' -') - - if child is not None: - child = child.replace('(', r'\(').replace(')', r'\)') - match = re.search(f'[*] - {child}\n.*$', stats_table, flags=re.DOTALL) - stats_row = match.group() - else: - stats_row = '*' + stats_table.split('*')[2] - - return stats_row.split(' -')[column].split('\n')[0].strip() - - -def get_children(cls: str) -> List[str]: - matches = re.findall('[*] -.*', get_stats_table(cls)) - return [match[4:] for match in matches[1:]] if len(matches) > 2 else [] diff --git a/pytorch_geometric-2.3.1/torch_geometric/experimental.py b/pytorch_geometric-2.3.1/torch_geometric/experimental.py deleted file mode 100644 index c4134c5..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/experimental.py +++ /dev/null @@ -1,79 +0,0 @@ -from typing import List, Optional, Union - -__experimental_flag__ = {} - -Options = Optional[Union[str, List[str]]] - - -def get_options(options: Options) -> List[str]: - if options is None: - options = list(__experimental_flag__.keys()) - if isinstance(options, str): - options = [options] - return options - - -def is_experimental_mode_enabled(options: Options = None) -> bool: - r"""Returns :obj:`True` if the experimental mode is enabled. See - :class:`torch_geometric.experimental_mode` for a list of (optional) - options.""" - options = get_options(options) - return all([__experimental_flag__[option] for option in options]) - - -def set_experimental_mode_enabled(mode: bool, options: Options = None): - for option in get_options(options): - __experimental_flag__[option] = mode - - -class experimental_mode: - r"""Context-manager that enables the experimental mode to test new but - potentially unstable features. - - .. code-block:: python - - with torch_geometric.experimental_mode(): - out = model(data.x, data.edge_index) - - Args: - options (str or list, optional): Currently there are no experimental - features. - """ - def __init__(self, options: Options = None): - self.options = get_options(options) - self.previous_state = { - option: __experimental_flag__[option] - for option in self.options - } - - def __enter__(self): - set_experimental_mode_enabled(True, self.options) - - def __exit__(self, *args) -> bool: - for option, value in self.previous_state.items(): - __experimental_flag__[option] = value - - -class set_experimental_mode: - r"""Context-manager that sets the experimental mode on or off. - - :class:`set_experimental_mode` will enable or disable the experimental mode - based on its argument :attr:`mode`. - It can be used as a context-manager or as a function. - - See :class:`experimental_mode` above for more details. - """ - def __init__(self, mode: bool, options: Options = None): - self.options = get_options(options) - self.previous_state = { - option: __experimental_flag__[option] - for option in self.options - } - set_experimental_mode_enabled(mode, self.options) - - def __enter__(self): - pass - - def __exit__(self, *args): - for option, value in self.previous_state.items(): - __experimental_flag__[option] = value diff --git a/pytorch_geometric-2.3.1/torch_geometric/explain/algorithm/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/explain/algorithm/__init__.py deleted file mode 100644 index e9b04ad..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/explain/algorithm/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from .base import ExplainerAlgorithm -from .dummy_explainer import DummyExplainer -from .gnn_explainer import GNNExplainer -from .captum_explainer import CaptumExplainer -from .pg_explainer import PGExplainer -from .attention_explainer import AttentionExplainer - -__all__ = classes = [ - 'ExplainerAlgorithm', - 'DummyExplainer', - 'GNNExplainer', - 'CaptumExplainer', - 'PGExplainer', - 'AttentionExplainer', -] diff --git a/pytorch_geometric-2.3.1/torch_geometric/explain/algorithm/utils.py b/pytorch_geometric-2.3.1/torch_geometric/explain/algorithm/utils.py deleted file mode 100644 index 0d181f3..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/explain/algorithm/utils.py +++ /dev/null @@ -1,67 +0,0 @@ -from typing import Dict, Union - -import torch -from torch import Tensor -from torch.nn import Parameter - -from torch_geometric.nn import MessagePassing -from torch_geometric.typing import EdgeType - - -def set_masks( - model: torch.nn.Module, - mask: Union[Tensor, Parameter], - edge_index: Tensor, - apply_sigmoid: bool = True, -): - r"""Apply mask to every graph layer in the :obj:`model`.""" - loop_mask = edge_index[0] != edge_index[1] - - # Loop over layers and set masks on MessagePassing layers: - for module in model.modules(): - if isinstance(module, MessagePassing): - - # Convert mask to a param if it was previously registered as one. - # This is a workaround for the fact that PyTorch does not allow - # assignments of pure tensors to parameter attributes: - if (not isinstance(mask, Parameter) - and '_edge_mask' in module._parameters): - mask = Parameter(mask) - - module.explain = True - module._edge_mask = mask - module._loop_mask = loop_mask - module._apply_sigmoid = apply_sigmoid - - -def set_hetero_masks( - model: torch.nn.Module, - mask_dict: Dict[EdgeType, Union[Tensor, Parameter]], - edge_index_dict: Dict[EdgeType, Tensor], - apply_sigmoid: bool = True, -): - r"""Apply masks to every heterogeneous graph layer in the :obj:`model` - according to edge types.""" - for module in model.modules(): - if isinstance(module, torch.nn.ModuleDict): - for edge_type in mask_dict.keys(): - # TODO (jinu) Use common function get `str_edge_type`. - str_edge_type = '__'.join(edge_type) - if str_edge_type in module: - set_masks( - module[str_edge_type], - mask_dict[edge_type], - edge_index_dict[edge_type], - apply_sigmoid=apply_sigmoid, - ) - - -def clear_masks(model: torch.nn.Module): - r"""Clear all masks from the model.""" - for module in model.modules(): - if isinstance(module, MessagePassing): - module.explain = False - module._edge_mask = None - module._loop_mask = None - module._apply_sigmoid = True - return module diff --git a/pytorch_geometric-2.3.1/torch_geometric/graphgym/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/graphgym/__init__.py deleted file mode 100644 index 8354971..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/graphgym/__init__.py +++ /dev/null @@ -1,67 +0,0 @@ -from .contrib import * # noqa -from .models import * # noqa -from .utils import * # noqa -from .checkpoint import load_ckpt, save_ckpt, remove_ckpt, clean_ckpt -from .cmd_args import parse_args -from .config import (cfg, set_cfg, load_cfg, dump_cfg, set_run_dir, - set_out_dir, get_fname) -from .init import init_weights -from .loader import create_loader -from .logger import set_printing, create_logger -from .loss import compute_loss -from .model_builder import create_model -from .optim import create_optimizer, create_scheduler -from .train import train -from .register import (register_base, register_act, register_node_encoder, - register_edge_encoder, register_stage, register_head, - register_layer, register_pooling, register_network, - register_config, register_dataset, register_loader, - register_optimizer, register_scheduler, register_loss, - register_train, register_metric) -from .config_store import (to_dataclass, dataclass_from_class, - class_from_dataclass, get_config_store) - -__all__ = classes = [ - 'load_ckpt', - 'save_ckpt', - 'remove_ckpt', - 'clean_ckpt', - 'parse_args', - 'cfg', - 'set_cfg', - 'load_cfg', - 'dump_cfg', - 'set_run_dir', - 'set_out_dir', - 'get_fname', - 'init_weights', - 'create_loader', - 'set_printing', - 'create_logger', - 'compute_loss', - 'create_model', - 'create_optimizer', - 'create_scheduler', - 'train', - 'register_base', - 'register_act', - 'register_node_encoder', - 'register_edge_encoder', - 'register_stage', - 'register_head', - 'register_layer', - 'register_pooling', - 'register_network', - 'register_config', - 'register_dataset', - 'register_loader', - 'register_optimizer', - 'register_scheduler', - 'register_loss', - 'register_train', - 'register_metric', - 'to_dataclass', - 'dataclass_from_class', - 'class_from_dataclass', - 'get_config_store', -] diff --git a/pytorch_geometric-2.3.1/torch_geometric/graphgym/init.py b/pytorch_geometric-2.3.1/torch_geometric/graphgym/init.py deleted file mode 100644 index 5782179..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/graphgym/init.py +++ /dev/null @@ -1,19 +0,0 @@ -import torch.nn as nn - - -def init_weights(m): - r""" - Performs weight initialization - - Args: - m (nn.Module): PyTorch module - - """ - if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): - m.weight.data.fill_(1.0) - m.bias.data.zero_() - elif isinstance(m, nn.Linear): - m.weight.data = nn.init.xavier_uniform_( - m.weight.data, gain=nn.init.calculate_gain('relu')) - if m.bias is not None: - m.bias.data.zero_() diff --git a/pytorch_geometric-2.3.1/torch_geometric/graphgym/models/act.py b/pytorch_geometric-2.3.1/torch_geometric/graphgym/models/act.py deleted file mode 100644 index 4f9bc4d..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/graphgym/models/act.py +++ /dev/null @@ -1,42 +0,0 @@ -import torch.nn as nn - -from torch_geometric.graphgym.config import cfg -from torch_geometric.graphgym.register import register_act - - -def relu(): - return nn.ReLU(inplace=cfg.mem.inplace) - - -def selu(): - return nn.SELU(inplace=cfg.mem.inplace) - - -def prelu(): - return nn.PReLU() - - -def elu(): - return nn.ELU(inplace=cfg.mem.inplace) - - -def lrelu_01(): - return nn.LeakyReLU(0.1, inplace=cfg.mem.inplace) - - -def lrelu_025(): - return nn.LeakyReLU(0.25, inplace=cfg.mem.inplace) - - -def lrelu_05(): - return nn.LeakyReLU(0.5, inplace=cfg.mem.inplace) - - -if cfg is not None: - register_act('relu', relu) - register_act('selu', selu) - register_act('prelu', prelu) - register_act('elu', elu) - register_act('lrelu_01', lrelu_01) - register_act('lrelu_025', lrelu_025) - register_act('lrelu_05', lrelu_05) diff --git a/pytorch_geometric-2.3.1/torch_geometric/graphgym/models/encoder.py b/pytorch_geometric-2.3.1/torch_geometric/graphgym/models/encoder.py deleted file mode 100644 index 8cb606e..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/graphgym/models/encoder.py +++ /dev/null @@ -1,89 +0,0 @@ -import torch - -from torch_geometric.graphgym.register import ( - register_edge_encoder, - register_node_encoder, -) - - -@register_node_encoder('Integer') -class IntegerFeatureEncoder(torch.nn.Module): - """ - Provides an encoder for integer node features. - - Args: - emb_dim (int): Output embedding dimension - num_classes (int): the number of classes for the - embedding mapping to learn from - """ - def __init__(self, emb_dim, num_classes=None): - super().__init__() - - self.encoder = torch.nn.Embedding(num_classes, emb_dim) - torch.nn.init.xavier_uniform_(self.encoder.weight.data) - - def forward(self, batch): - # Encode just the first dimension if more exist - batch.x = self.encoder(batch.x[:, 0]) - - return batch - - -@register_node_encoder('Atom') -class AtomEncoder(torch.nn.Module): - """ - The atom Encoder used in OGB molecule dataset. - - Args: - emb_dim (int): Output embedding dimension - num_classes: None - """ - def __init__(self, emb_dim, num_classes=None): - super().__init__() - - from ogb.utils.features import get_atom_feature_dims - - self.atom_embedding_list = torch.nn.ModuleList() - - for i, dim in enumerate(get_atom_feature_dims()): - emb = torch.nn.Embedding(dim, emb_dim) - torch.nn.init.xavier_uniform_(emb.weight.data) - self.atom_embedding_list.append(emb) - - def forward(self, batch): - encoded_features = 0 - for i in range(batch.x.shape[1]): - encoded_features += self.atom_embedding_list[i](batch.x[:, i]) - - batch.x = encoded_features - return batch - - -@register_edge_encoder('Bond') -class BondEncoder(torch.nn.Module): - """ - The bond Encoder used in OGB molecule dataset. - - Args: - emb_dim (int): Output edge embedding dimension - """ - def __init__(self, emb_dim): - super().__init__() - - from ogb.utils.features import get_bond_feature_dims - - self.bond_embedding_list = torch.nn.ModuleList() - - for i, dim in enumerate(get_bond_feature_dims()): - emb = torch.nn.Embedding(dim, emb_dim) - torch.nn.init.xavier_uniform_(emb.weight.data) - self.bond_embedding_list.append(emb) - - def forward(self, batch): - bond_embedding = 0 - for i in range(batch.edge_attr.shape[1]): - edge_attr = batch.edge_attr - bond_embedding += self.bond_embedding_list[i](edge_attr[:, i]) - - batch.edge_attr = bond_embedding - return batch diff --git a/pytorch_geometric-2.3.1/torch_geometric/graphgym/models/gnn.py b/pytorch_geometric-2.3.1/torch_geometric/graphgym/models/gnn.py deleted file mode 100644 index af521c4..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/graphgym/models/gnn.py +++ /dev/null @@ -1,154 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -import torch_geometric.graphgym.register as register -from torch_geometric.graphgym.config import cfg -from torch_geometric.graphgym.init import init_weights -from torch_geometric.graphgym.models.layer import ( - BatchNorm1dNode, - GeneralLayer, - GeneralMultiLayer, - new_layer_config, -) -from torch_geometric.graphgym.register import register_stage - - -def GNNLayer(dim_in, dim_out, has_act=True): - """ - Wrapper for a GNN layer - - Args: - dim_in (int): Input dimension - dim_out (int): Output dimension - has_act (bool): Whether has activation function after the layer - - """ - return GeneralLayer( - cfg.gnn.layer_type, - layer_config=new_layer_config(dim_in, dim_out, 1, has_act=has_act, - has_bias=False, cfg=cfg)) - - -def GNNPreMP(dim_in, dim_out, num_layers): - """ - Wrapper for NN layer before GNN message passing - - Args: - dim_in (int): Input dimension - dim_out (int): Output dimension - num_layers (int): Number of layers - - """ - return GeneralMultiLayer( - 'linear', - layer_config=new_layer_config(dim_in, dim_out, num_layers, - has_act=False, has_bias=False, cfg=cfg)) - - -@register_stage('stack') -@register_stage('skipsum') -@register_stage('skipconcat') -class GNNStackStage(nn.Module): - """ - Simple Stage that stack GNN layers - - Args: - dim_in (int): Input dimension - dim_out (int): Output dimension - num_layers (int): Number of GNN layers - """ - def __init__(self, dim_in, dim_out, num_layers): - super().__init__() - self.num_layers = num_layers - for i in range(num_layers): - if cfg.gnn.stage_type == 'skipconcat': - d_in = dim_in if i == 0 else dim_in + i * dim_out - else: - d_in = dim_in if i == 0 else dim_out - layer = GNNLayer(d_in, dim_out) - self.add_module('layer{}'.format(i), layer) - - def forward(self, batch): - for i, layer in enumerate(self.children()): - x = batch.x - batch = layer(batch) - if cfg.gnn.stage_type == 'skipsum': - batch.x = x + batch.x - elif cfg.gnn.stage_type == 'skipconcat' and \ - i < self.num_layers - 1: - batch.x = torch.cat([x, batch.x], dim=1) - if cfg.gnn.l2norm: - batch.x = F.normalize(batch.x, p=2, dim=-1) - return batch - - -class FeatureEncoder(nn.Module): - """ - Encoding node and edge features - - Args: - dim_in (int): Input feature dimension - """ - def __init__(self, dim_in): - super().__init__() - self.dim_in = dim_in - if cfg.dataset.node_encoder: - # Encode integer node features via nn.Embeddings - NodeEncoder = register.node_encoder_dict[ - cfg.dataset.node_encoder_name] - self.node_encoder = NodeEncoder(cfg.gnn.dim_inner) - if cfg.dataset.node_encoder_bn: - self.node_encoder_bn = BatchNorm1dNode( - new_layer_config(cfg.gnn.dim_inner, -1, -1, has_act=False, - has_bias=False, cfg=cfg)) - # Update dim_in to reflect the new dimension fo the node features - self.dim_in = cfg.gnn.dim_inner - if cfg.dataset.edge_encoder: - # Encode integer edge features via nn.Embeddings - EdgeEncoder = register.edge_encoder_dict[ - cfg.dataset.edge_encoder_name] - self.edge_encoder = EdgeEncoder(cfg.gnn.dim_inner) - if cfg.dataset.edge_encoder_bn: - self.edge_encoder_bn = BatchNorm1dNode( - new_layer_config(cfg.gnn.dim_inner, -1, -1, has_act=False, - has_bias=False, cfg=cfg)) - - def forward(self, batch): - for module in self.children(): - batch = module(batch) - return batch - - -class GNN(nn.Module): - """ - General GNN model: encoder + stage + head - - Args: - dim_in (int): Input dimension - dim_out (int): Output dimension - **kwargs (optional): Optional additional args - """ - def __init__(self, dim_in, dim_out, **kwargs): - super().__init__() - GNNStage = register.stage_dict[cfg.gnn.stage_type] - GNNHead = register.head_dict[cfg.gnn.head] - - self.encoder = FeatureEncoder(dim_in) - dim_in = self.encoder.dim_in - - if cfg.gnn.layers_pre_mp > 0: - self.pre_mp = GNNPreMP(dim_in, cfg.gnn.dim_inner, - cfg.gnn.layers_pre_mp) - dim_in = cfg.gnn.dim_inner - if cfg.gnn.layers_mp > 0: - self.mp = GNNStage(dim_in=dim_in, dim_out=cfg.gnn.dim_inner, - num_layers=cfg.gnn.layers_mp) - self.post_mp = GNNHead(dim_in=cfg.gnn.dim_inner, dim_out=dim_out) - - self.apply(init_weights) - - def forward(self, batch): - for module in self.children(): - batch = module(batch) - return batch diff --git a/pytorch_geometric-2.3.1/torch_geometric/graphgym/models/head.py b/pytorch_geometric-2.3.1/torch_geometric/graphgym/models/head.py deleted file mode 100644 index e841972..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/graphgym/models/head.py +++ /dev/null @@ -1,118 +0,0 @@ -""" GNN heads are the last layer of a GNN right before loss computation. -They are constructed in the init function of the gnn.GNN. -""" - -import torch -import torch.nn as nn - -import torch_geometric.graphgym.register as register -from torch_geometric.graphgym.config import cfg -from torch_geometric.graphgym.models.layer import MLP, new_layer_config -from torch_geometric.graphgym.register import register_head - - -@register_head('node') -class GNNNodeHead(nn.Module): - """ - GNN prediction head for node prediction tasks. - - Args: - dim_in (int): Input dimension - dim_out (int): Output dimension. For binary prediction, dim_out=1. - """ - def __init__(self, dim_in, dim_out): - super().__init__() - self.layer_post_mp = MLP( - new_layer_config(dim_in, dim_out, cfg.gnn.layers_post_mp, - has_act=False, has_bias=True, cfg=cfg)) - - def _apply_index(self, batch): - mask = '{}_mask'.format(batch.split) - return batch.x[batch[mask]], \ - batch.y[batch[mask]] - - def forward(self, batch): - batch = self.layer_post_mp(batch) - pred, label = self._apply_index(batch) - return pred, label - - -@register_head('edge') -@register_head('link_pred') -class GNNEdgeHead(nn.Module): - """ - GNN prediction head for edge/link prediction tasks. - - Args: - dim_in (int): Input dimension - dim_out (int): Output dimension. For binary prediction, dim_out=1. - """ - def __init__(self, dim_in, dim_out): - super().__init__() - # module to decode edges from node embeddings - if cfg.model.edge_decoding == 'concat': - self.layer_post_mp = MLP( - new_layer_config(dim_in * 2, dim_out, cfg.gnn.layers_post_mp, - has_act=False, has_bias=True, cfg=cfg)) - # requires parameter - self.decode_module = lambda v1, v2: \ - self.layer_post_mp(torch.cat((v1, v2), dim=-1)) - else: - if dim_out > 1: - raise ValueError( - 'Binary edge decoding ({})is used for multi-class ' - 'edge/link prediction.'.format(cfg.model.edge_decoding)) - self.layer_post_mp = MLP( - new_layer_config(dim_in, dim_in, cfg.gnn.layers_post_mp, - has_act=False, has_bias=True, cfg=cfg)) - if cfg.model.edge_decoding == 'dot': - self.decode_module = lambda v1, v2: torch.sum(v1 * v2, dim=-1) - elif cfg.model.edge_decoding == 'cosine_similarity': - self.decode_module = nn.CosineSimilarity(dim=-1) - else: - raise ValueError('Unknown edge decoding {}.'.format( - cfg.model.edge_decoding)) - - def _apply_index(self, batch): - index = '{}_edge_index'.format(batch.split) - label = '{}_edge_label'.format(batch.split) - return batch.x[batch[index]], \ - batch[label] - - def forward(self, batch): - if cfg.model.edge_decoding != 'concat': - batch = self.layer_post_mp(batch) - pred, label = self._apply_index(batch) - nodes_first = pred[0] - nodes_second = pred[1] - pred = self.decode_module(nodes_first, nodes_second) - return pred, label - - -@register_head('graph') -class GNNGraphHead(nn.Module): - """ - GNN prediction head for graph prediction tasks. - The optional post_mp layer (specified by cfg.gnn.post_mp) is used - to transform the pooled embedding using an MLP. - - Args: - dim_in (int): Input dimension - dim_out (int): Output dimension. For binary prediction, dim_out=1. - """ - def __init__(self, dim_in, dim_out): - super().__init__() - self.layer_post_mp = MLP( - new_layer_config(dim_in, dim_out, cfg.gnn.layers_post_mp, - has_act=False, has_bias=True, cfg=cfg)) - self.pooling_fun = register.pooling_dict[cfg.model.graph_pooling] - - def _apply_index(self, batch): - return batch.graph_feature, batch.y - - def forward(self, batch): - graph_emb = self.pooling_fun(batch.x, batch.batch) - graph_emb = self.layer_post_mp(graph_emb) - batch.graph_feature = graph_emb - pred, label = self._apply_index(batch) - return pred, label diff --git a/pytorch_geometric-2.3.1/torch_geometric/graphgym/models/layer.py b/pytorch_geometric-2.3.1/torch_geometric/graphgym/models/layer.py deleted file mode 100644 index aa5e047..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/graphgym/models/layer.py +++ /dev/null @@ -1,379 +0,0 @@ -import copy -from dataclasses import dataclass, replace - -import torch -import torch.nn as nn -import torch.nn.functional as F - -import torch_geometric as pyg -import torch_geometric.graphgym.models.act -import torch_geometric.graphgym.register as register -from torch_geometric.graphgym.contrib.layer.generalconv import ( - GeneralConvLayer, - GeneralEdgeConvLayer, -) -from torch_geometric.graphgym.register import register_layer -from torch_geometric.nn import Linear as Linear_pyg - - -@dataclass -class LayerConfig: - # batchnorm parameters. - has_batchnorm: bool = False - bn_eps: float = 1e-5 - bn_mom: float = 0.1 - - # mem parameters. - mem_inplace: bool = False - - # gnn parameters. - dim_in: int = -1 - dim_out: int = -1 - edge_dim: int = -1 - dim_inner: int = None - num_layers: int = 2 - has_bias: bool = True - # regularizer parameters. - has_l2norm: bool = True - dropout: float = 0.0 - # activation parameters. - has_act: bool = True - final_act: bool = True - act: str = 'relu' - - # other parameters. - keep_edge: float = 0.5 - - -def new_layer_config(dim_in, dim_out, num_layers, has_act, has_bias, cfg): - return LayerConfig( - has_batchnorm=cfg.gnn.batchnorm, - bn_eps=cfg.bn.eps, - bn_mom=cfg.bn.mom, - mem_inplace=cfg.mem.inplace, - dim_in=dim_in, - dim_out=dim_out, - edge_dim=cfg.dataset.edge_dim, - has_l2norm=cfg.gnn.l2norm, - dropout=cfg.gnn.dropout, - has_act=has_act, - final_act=True, - act=cfg.gnn.act, - has_bias=has_bias, - keep_edge=cfg.gnn.keep_edge, - dim_inner=cfg.gnn.dim_inner, - num_layers=num_layers, - ) - - -# General classes -class GeneralLayer(nn.Module): - """ - General wrapper for layers - - Args: - name (str): Name of the layer in registered :obj:`layer_dict` - dim_in (int): Input dimension - dim_out (int): Output dimension - has_act (bool): Whether has activation after the layer - has_bn (bool): Whether has BatchNorm in the layer - has_l2norm (bool): Wheter has L2 normalization after the layer - **kwargs (optional): Additional args - """ - def __init__(self, name, layer_config: LayerConfig, **kwargs): - super().__init__() - self.has_l2norm = layer_config.has_l2norm - has_bn = layer_config.has_batchnorm - layer_config.has_bias = not has_bn - self.layer = register.layer_dict[name](layer_config, **kwargs) - layer_wrapper = [] - if has_bn: - layer_wrapper.append( - nn.BatchNorm1d(layer_config.dim_out, eps=layer_config.bn_eps, - momentum=layer_config.bn_mom)) - if layer_config.dropout > 0: - layer_wrapper.append( - nn.Dropout(p=layer_config.dropout, - inplace=layer_config.mem_inplace)) - if layer_config.has_act: - layer_wrapper.append(register.act_dict[layer_config.act]()) - self.post_layer = nn.Sequential(*layer_wrapper) - - def forward(self, batch): - batch = self.layer(batch) - if isinstance(batch, torch.Tensor): - batch = self.post_layer(batch) - if self.has_l2norm: - batch = F.normalize(batch, p=2, dim=1) - else: - batch.x = self.post_layer(batch.x) - if self.has_l2norm: - batch.x = F.normalize(batch.x, p=2, dim=1) - return batch - - -class GeneralMultiLayer(nn.Module): - """ - General wrapper for a stack of multiple layers - - Args: - name (str): Name of the layer in registered :obj:`layer_dict` - num_layers (int): Number of layers in the stack - dim_in (int): Input dimension - dim_out (int): Output dimension - dim_inner (int): The dimension for the inner layers - final_act (bool): Whether has activation after the layer stack - **kwargs (optional): Additional args - """ - def __init__(self, name, layer_config: LayerConfig, **kwargs): - super().__init__() - dim_inner = layer_config.dim_out \ - if layer_config.dim_inner is None \ - else layer_config.dim_inner - for i in range(layer_config.num_layers): - d_in = layer_config.dim_in if i == 0 else dim_inner - d_out = layer_config.dim_out \ - if i == layer_config.num_layers - 1 else dim_inner - has_act = layer_config.final_act \ - if i == layer_config.num_layers - 1 else True - inter_layer_config = copy.deepcopy(layer_config) - inter_layer_config.dim_in = d_in - inter_layer_config.dim_out = d_out - inter_layer_config.has_act = has_act - layer = GeneralLayer(name, inter_layer_config, **kwargs) - self.add_module('Layer_{}'.format(i), layer) - - def forward(self, batch): - for layer in self.children(): - batch = layer(batch) - return batch - - -# ---------- Core basic layers. Input: batch; Output: batch ----------------- # - - -@register_layer('linear') -class Linear(nn.Module): - """ - Basic Linear layer. - - Args: - dim_in (int): Input dimension - dim_out (int): Output dimension - bias (bool): Whether has bias term - **kwargs (optional): Additional args - """ - def __init__(self, layer_config: LayerConfig, **kwargs): - super().__init__() - self.model = Linear_pyg(layer_config.dim_in, layer_config.dim_out, - bias=layer_config.has_bias) - - def forward(self, batch): - if isinstance(batch, torch.Tensor): - batch = self.model(batch) - else: - batch.x = self.model(batch.x) - return batch - - -class BatchNorm1dNode(nn.Module): - """ - BatchNorm for node feature. - - Args: - dim_in (int): Input dimension - """ - def __init__(self, layer_config: LayerConfig): - super().__init__() - self.bn = nn.BatchNorm1d(layer_config.dim_in, eps=layer_config.bn_eps, - momentum=layer_config.bn_mom) - - def forward(self, batch): - batch.x = self.bn(batch.x) - return batch - - -class BatchNorm1dEdge(nn.Module): - """ - BatchNorm for edge feature. - - Args: - dim_in (int): Input dimension - """ - def __init__(self, layer_config: LayerConfig): - super().__init__() - self.bn = nn.BatchNorm1d(layer_config.dim_in, eps=layer_config.bn_eps, - momentum=layer_config.bn_mom) - - def forward(self, batch): - batch.edge_attr = self.bn(batch.edge_attr) - return batch - - -@register_layer('mlp') -class MLP(nn.Module): - """ - Basic MLP model. - Here 1-layer MLP is equivalent to a Liner layer. - - Args: - dim_in (int): Input dimension - dim_out (int): Output dimension - bias (bool): Whether has bias term - dim_inner (int): The dimension for the inner layers - num_layers (int): Number of layers in the stack - **kwargs (optional): Additional args - """ - def __init__(self, layer_config: LayerConfig, **kwargs): - super().__init__() - dim_inner = layer_config.dim_in \ - if layer_config.dim_inner is None \ - else layer_config.dim_inner - layer_config.has_bias = True - layers = [] - if layer_config.num_layers > 1: - sub_layer_config = LayerConfig( - num_layers=layer_config.num_layers - 1, - dim_in=layer_config.dim_in, dim_out=dim_inner, - dim_inner=dim_inner, final_act=True) - layers.append(GeneralMultiLayer('linear', sub_layer_config)) - layer_config = replace(layer_config, dim_in=dim_inner) - layers.append(Linear(layer_config)) - else: - layers.append(Linear(layer_config)) - self.model = nn.Sequential(*layers) - - def forward(self, batch): - if isinstance(batch, torch.Tensor): - batch = self.model(batch) - else: - batch.x = self.model(batch.x) - return batch - - -@register_layer('gcnconv') -class GCNConv(nn.Module): - """ - Graph Convolutional Network (GCN) layer - """ - def __init__(self, layer_config: LayerConfig, **kwargs): - super().__init__() - self.model = pyg.nn.GCNConv(layer_config.dim_in, layer_config.dim_out, - bias=layer_config.has_bias) - - def forward(self, batch): - batch.x = self.model(batch.x, batch.edge_index) - return batch - - -@register_layer('sageconv') -class SAGEConv(nn.Module): - """ - GraphSAGE Conv layer - """ - def __init__(self, layer_config: LayerConfig, **kwargs): - super().__init__() - self.model = pyg.nn.SAGEConv(layer_config.dim_in, layer_config.dim_out, - bias=layer_config.has_bias) - - def forward(self, batch): - batch.x = self.model(batch.x, batch.edge_index) - return batch - - -@register_layer('gatconv') -class GATConv(nn.Module): - """ - Graph Attention Network (GAT) layer - """ - def __init__(self, layer_config: LayerConfig, **kwargs): - super().__init__() - self.model = pyg.nn.GATConv(layer_config.dim_in, layer_config.dim_out, - bias=layer_config.has_bias) - - def forward(self, batch): - batch.x = self.model(batch.x, batch.edge_index) - return batch - - -@register_layer('ginconv') -class GINConv(nn.Module): - """ - Graph Isomorphism Network (GIN) layer - """ - def __init__(self, layer_config: LayerConfig, **kwargs): - super().__init__() - gin_nn = nn.Sequential( - Linear_pyg(layer_config.dim_in, layer_config.dim_out), nn.ReLU(), - Linear_pyg(layer_config.dim_out, layer_config.dim_out)) - self.model = pyg.nn.GINConv(gin_nn) - - def forward(self, batch): - batch.x = self.model(batch.x, batch.edge_index) - return batch - - -@register_layer('splineconv') -class SplineConv(nn.Module): - """ - SplineCNN layer - """ - def __init__(self, layer_config: LayerConfig, **kwargs): - super().__init__() - self.model = pyg.nn.SplineConv(layer_config.dim_in, - layer_config.dim_out, dim=1, - kernel_size=2, - bias=layer_config.has_bias) - - def forward(self, batch): - batch.x = self.model(batch.x, batch.edge_index, batch.edge_attr) - return batch - - -@register_layer('generalconv') -class GeneralConv(nn.Module): - """A general GNN layer""" - def __init__(self, layer_config: LayerConfig, **kwargs): - super().__init__() - self.model = GeneralConvLayer(layer_config.dim_in, - layer_config.dim_out, - bias=layer_config.has_bias) - - def forward(self, batch): - batch.x = self.model(batch.x, batch.edge_index) - return batch - - -@register_layer('generaledgeconv') -class GeneralEdgeConv(nn.Module): - """A general GNN layer that supports edge features as well""" - def __init__(self, layer_config: LayerConfig, **kwargs): - super().__init__() - self.model = GeneralEdgeConvLayer(layer_config.dim_in, - layer_config.dim_out, - layer_config.edge_dim, - bias=layer_config.has_bias) - - def forward(self, batch): - batch.x = self.model(batch.x, batch.edge_index, - edge_feature=batch.edge_attr) - return batch - - -@register_layer('generalsampleedgeconv') -class GeneralSampleEdgeConv(nn.Module): - """A general GNN layer that supports edge features and edge sampling""" - def __init__(self, layer_config: LayerConfig, **kwargs): - super().__init__() - self.model = GeneralEdgeConvLayer(layer_config.dim_in, - layer_config.dim_out, - layer_config.edge_dim, - bias=layer_config.has_bias) - self.keep_edge = layer_config.keep_edge - - def forward(self, batch): - edge_mask = torch.rand(batch.edge_index.shape[1]) < self.keep_edge - edge_index = batch.edge_index[:, edge_mask] - edge_feature = batch.edge_attr[edge_mask, :] - batch.x = self.model(batch.x, edge_index, edge_feature=edge_feature) - return batch diff --git a/pytorch_geometric-2.3.1/torch_geometric/graphgym/train.py b/pytorch_geometric-2.3.1/torch_geometric/graphgym/train.py deleted file mode 100644 index f04a7a0..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/graphgym/train.py +++ /dev/null @@ -1,56 +0,0 @@ -import warnings -from typing import Optional - -import torch -from torch.utils.data import DataLoader - -from torch_geometric.data.lightning.datamodule import LightningDataModule -from torch_geometric.graphgym import create_loader -from torch_geometric.graphgym.checkpoint import get_ckpt_dir -from torch_geometric.graphgym.config import cfg -from torch_geometric.graphgym.imports import pl -from torch_geometric.graphgym.logger import LoggerCallback -from torch_geometric.graphgym.model_builder import GraphGymModule - - -class GraphGymDataModule(LightningDataModule): - def __init__(self): - self.loaders = create_loader() - super().__init__(has_val=True, has_test=True) - - def train_dataloader(self) -> DataLoader: - return self.loaders[0] - - def val_dataloader(self) -> DataLoader: - # better way would be to test after fit. - # First call trainer.fit(...) then trainer.test(...) - return self.loaders[1] - - def test_dataloader(self) -> DataLoader: - return self.loaders[2] - - -def train(model: GraphGymModule, datamodule, logger: bool = True, - trainer_config: Optional[dict] = None): - warnings.filterwarnings('ignore', '.*use `CSVLogger` as the default.*') - - callbacks = [] - if logger: - callbacks.append(LoggerCallback()) - if cfg.train.enable_ckpt: - ckpt_cbk = pl.callbacks.ModelCheckpoint(dirpath=get_ckpt_dir()) - callbacks.append(ckpt_cbk) - - trainer_config = trainer_config or {} - trainer = pl.Trainer( - **trainer_config, - enable_checkpointing=cfg.train.enable_ckpt, - callbacks=callbacks, - default_root_dir=cfg.out_dir, - max_epochs=cfg.optim.max_epoch, - accelerator=cfg.accelerator, - devices='auto' if not torch.cuda.is_available() else cfg.devices, - ) - - trainer.fit(model, datamodule=datamodule) - trainer.test(model, datamodule=datamodule) diff --git a/pytorch_geometric-2.3.1/torch_geometric/io/npz.py b/pytorch_geometric-2.3.1/torch_geometric/io/npz.py deleted file mode 100644 index 075b919..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/io/npz.py +++ /dev/null @@ -1,30 +0,0 @@ -import numpy as np -import scipy.sparse as sp -import torch - -from torch_geometric.data import Data -from torch_geometric.utils import remove_self_loops, to_undirected - - -def read_npz(path): - with np.load(path) as f: - return parse_npz(f) - - -def parse_npz(f): - x = sp.csr_matrix((f['attr_data'], f['attr_indices'], f['attr_indptr']), - f['attr_shape']).todense() - x = torch.from_numpy(x).to(torch.float) - x[x > 0] = 1 - - adj = sp.csr_matrix((f['adj_data'], f['adj_indices'], f['adj_indptr']), - f['adj_shape']).tocoo() - row = torch.from_numpy(adj.row).to(torch.long) - col = torch.from_numpy(adj.col).to(torch.long) - edge_index = torch.stack([row, col], dim=0) - edge_index, _ = remove_self_loops(edge_index) - edge_index = to_undirected(edge_index, num_nodes=x.size(0)) - - y = torch.from_numpy(f['labels']).to(torch.long) - - return Data(x=x, edge_index=edge_index, y=y) diff --git a/pytorch_geometric-2.3.1/torch_geometric/io/planetoid.py b/pytorch_geometric-2.3.1/torch_geometric/io/planetoid.py deleted file mode 100644 index ba76766..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/io/planetoid.py +++ /dev/null @@ -1,122 +0,0 @@ -import os.path as osp -import sys -import warnings -from itertools import repeat - -import torch - -from torch_geometric.data import Data -from torch_geometric.io import read_txt_array -from torch_geometric.typing import SparseTensor -from torch_geometric.utils import coalesce, index_to_mask, remove_self_loops - -try: - import cPickle as pickle -except ImportError: - import pickle - - -def read_planetoid_data(folder, prefix): - names = ['x', 'tx', 'allx', 'y', 'ty', 'ally', 'graph', 'test.index'] - items = [read_file(folder, prefix, name) for name in names] - x, tx, allx, y, ty, ally, graph, test_index = items - train_index = torch.arange(y.size(0), dtype=torch.long) - val_index = torch.arange(y.size(0), y.size(0) + 500, dtype=torch.long) - sorted_test_index = test_index.sort()[0] - - if prefix.lower() == 'citeseer': - # There are some isolated nodes in the Citeseer graph, resulting in - # none consecutive test indices. We need to identify them and add them - # as zero vectors to `tx` and `ty`. - len_test_indices = (test_index.max() - test_index.min()).item() + 1 - - tx_ext = torch.zeros(len_test_indices, tx.size(1)) - tx_ext[sorted_test_index - test_index.min(), :] = tx - ty_ext = torch.zeros(len_test_indices, ty.size(1)) - ty_ext[sorted_test_index - test_index.min(), :] = ty - - tx, ty = tx_ext, ty_ext - - if prefix.lower() == 'nell.0.001': - tx_ext = torch.zeros(len(graph) - allx.size(0), x.size(1)) - tx_ext[sorted_test_index - allx.size(0)] = tx - - ty_ext = torch.zeros(len(graph) - ally.size(0), y.size(1)) - ty_ext[sorted_test_index - ally.size(0)] = ty - - tx, ty = tx_ext, ty_ext - - x = torch.cat([allx, tx], dim=0) - x[test_index] = x[sorted_test_index] - - # Creating feature vectors for relations. - row, col, value = SparseTensor.from_dense(x).coo() - rows, cols, values = [row], [col], [value] - - mask1 = index_to_mask(test_index, size=len(graph)) - mask2 = index_to_mask(torch.arange(allx.size(0), len(graph)), - size=len(graph)) - mask = ~mask1 | ~mask2 - isolated_index = mask.nonzero(as_tuple=False).view(-1)[allx.size(0):] - - rows += [isolated_index] - cols += [torch.arange(isolated_index.size(0)) + x.size(1)] - values += [torch.ones(isolated_index.size(0))] - - x = SparseTensor(row=torch.cat(rows), col=torch.cat(cols), - value=torch.cat(values)) - else: - x = torch.cat([allx, tx], dim=0) - x[test_index] = x[sorted_test_index] - - y = torch.cat([ally, ty], dim=0).max(dim=1)[1] - y[test_index] = y[sorted_test_index] - - train_mask = index_to_mask(train_index, size=y.size(0)) - val_mask = index_to_mask(val_index, size=y.size(0)) - test_mask = index_to_mask(test_index, size=y.size(0)) - - edge_index = edge_index_from_dict(graph, num_nodes=y.size(0)) - - data = Data(x=x, edge_index=edge_index, y=y) - data.train_mask = train_mask - data.val_mask = val_mask - data.test_mask = test_mask - - return data - - -def read_file(folder, prefix, name): - path = osp.join(folder, f'ind.{prefix.lower()}.{name}') - - if name == 'test.index': - return read_txt_array(path, dtype=torch.long) - - with open(path, 'rb') as f: - if sys.version_info > (3, 0): - warnings.filterwarnings('ignore', '.*`scipy.sparse.csr` name.*') - out = pickle.load(f, encoding='latin1') - else: - out = pickle.load(f) - - if name == 'graph': - return out - - out = out.todense() if hasattr(out, 'todense') else out - out = torch.from_numpy(out).to(torch.float) - return out - - -def edge_index_from_dict(graph_dict, num_nodes=None): - row, col = [], [] - for key, value in graph_dict.items(): - row += repeat(key, len(value)) - col += value - edge_index = torch.stack([torch.tensor(row), torch.tensor(col)], dim=0) - - # NOTE: There are some duplicated edges and self loops in the datasets. - # Other implementations do not remove them! - edge_index, _ = remove_self_loops(edge_index) - edge_index = coalesce(edge_index, num_nodes=num_nodes) - - return edge_index diff --git a/pytorch_geometric-2.3.1/torch_geometric/loader/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/loader/__init__.py deleted file mode 100644 index 600d348..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/loader/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -from torch_geometric.deprecation import deprecated - -from .dataloader import DataLoader -from .node_loader import NodeLoader -from .link_loader import LinkLoader -from .neighbor_loader import NeighborLoader -from .link_neighbor_loader import LinkNeighborLoader -from .hgt_loader import HGTLoader -from .cluster import ClusterData, ClusterLoader -from .graph_saint import (GraphSAINTSampler, GraphSAINTNodeSampler, - GraphSAINTEdgeSampler, GraphSAINTRandomWalkSampler) -from .shadow import ShaDowKHopSampler -from .random_node_loader import RandomNodeLoader -from .zip_loader import ZipLoader -from .data_list_loader import DataListLoader -from .dense_data_loader import DenseDataLoader -from .temporal_dataloader import TemporalDataLoader -from .neighbor_sampler import NeighborSampler -from .imbalanced_sampler import ImbalancedSampler -from .dynamic_batch_sampler import DynamicBatchSampler -from .mixin import AffinityMixin - -__all__ = classes = [ - 'DataLoader', - 'NodeLoader', - 'LinkLoader', - 'NeighborLoader', - 'LinkNeighborLoader', - 'HGTLoader', - 'ClusterData', - 'ClusterLoader', - 'GraphSAINTSampler', - 'GraphSAINTNodeSampler', - 'GraphSAINTEdgeSampler', - 'GraphSAINTRandomWalkSampler', - 'ShaDowKHopSampler', - 'RandomNodeLoader', - 'ZipLoader', - 'DataListLoader', - 'DenseDataLoader', - 'TemporalDataLoader', - 'NeighborSampler', - 'ImbalancedSampler', - 'DynamicBatchSampler', - 'AffinityMixin', -] - -RandomNodeSampler = deprecated( - details="use 'loader.RandomNodeLoader' instead", - func_name='loader.RandomNodeSampler', -)(RandomNodeLoader) diff --git a/pytorch_geometric-2.3.1/torch_geometric/loader/cluster.py b/pytorch_geometric-2.3.1/torch_geometric/loader/cluster.py deleted file mode 100644 index 75aa537..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/loader/cluster.py +++ /dev/null @@ -1,172 +0,0 @@ -import copy -import os.path as osp -import sys -from typing import Optional - -import torch -import torch.utils.data - -from torch_geometric.typing import SparseTensor, torch_sparse -from torch_geometric.utils import narrow, select - - -class ClusterData(torch.utils.data.Dataset): - r"""Clusters/partitions a graph data object into multiple subgraphs, as - motivated by the `"Cluster-GCN: An Efficient Algorithm for Training Deep - and Large Graph Convolutional Networks" - `_ paper. - - .. note:: - The underlying METIS algorithm requires undirected graphs as input. - - Args: - data (torch_geometric.data.Data): The graph data object. - num_parts (int): The number of partitions. - recursive (bool, optional): If set to :obj:`True`, will use multilevel - recursive bisection instead of multilevel k-way partitioning. - (default: :obj:`False`) - save_dir (str, optional): If set, will save the partitioned data to the - :obj:`save_dir` directory for faster re-use. (default: :obj:`None`) - log (bool, optional): If set to :obj:`False`, will not log any - progress. (default: :obj:`True`) - """ - def __init__(self, data, num_parts: int, recursive: bool = False, - save_dir: Optional[str] = None, log: bool = True): - - assert data.edge_index is not None - - self.num_parts = num_parts - - recursive_str = '_recursive' if recursive else '' - filename = f'partition_{num_parts}{recursive_str}.pt' - path = osp.join(save_dir or '', filename) - if save_dir is not None and osp.exists(path): - adj, partptr, perm = torch.load(path) - else: - if log: # pragma: no cover - print('Computing METIS partitioning...', file=sys.stderr) - - N, E = data.num_nodes, data.num_edges - adj = SparseTensor( - row=data.edge_index[0], col=data.edge_index[1], - value=torch.arange(E, device=data.edge_index.device), - sparse_sizes=(N, N)) - adj, partptr, perm = adj.partition(num_parts, recursive) - - if save_dir is not None: - torch.save((adj, partptr, perm), path) - - if log: # pragma: no cover - print('Done!', file=sys.stderr) - - self.data = self._permute_data(data, perm, adj) - self.partptr = partptr - self.perm = perm - - def _permute_data(self, data, node_idx, adj): - out = copy.copy(data) - for key, value in data.items(): - if data.is_node_attr(key): - cat_dim = data.__cat_dim__(key, value) - out[key] = select(value, node_idx, dim=cat_dim) - - out.edge_index = None - out.adj = adj - - return out - - def __len__(self): - return self.partptr.numel() - 1 - - def __getitem__(self, idx): - start = int(self.partptr[idx]) - length = int(self.partptr[idx + 1]) - start - - data = copy.copy(self.data) - adj, data.adj = data.adj, None - - adj = adj.narrow(0, start, length).narrow(1, start, length) - edge_idx = adj.storage.value() - - for key, value in data: - if key == 'num_nodes': - data.num_nodes = length - elif self.data.is_node_attr(key): - cat_dim = self.data.__cat_dim__(key, value) - data[key] = narrow(value, cat_dim, start, length) - elif self.data.is_edge_attr(key): - cat_dim = self.data.__cat_dim__(key, value) - data[key] = select(value, edge_idx, dim=cat_dim) - - row, col, _ = adj.coo() - data.edge_index = torch.stack([row, col], dim=0) - - return data - - def __repr__(self): - return (f'{self.__class__.__name__}(\n' - f' data={self.data},\n' - f' num_parts={self.num_parts}\n' - f')') - - -class ClusterLoader(torch.utils.data.DataLoader): - r"""The data loader scheme from the `"Cluster-GCN: An Efficient Algorithm - for Training Deep and Large Graph Convolutional Networks" - `_ paper which merges partioned subgraphs - and their between-cluster links from a large-scale graph data object to - form a mini-batch. - - .. note:: - - Use :class:`~torch_geometric.loader.ClusterData` and - :class:`~torch_geometric.loader.ClusterLoader` in conjunction to - form mini-batches of clusters. - For an example of using Cluster-GCN, see - `examples/cluster_gcn_reddit.py `_ or - `examples/cluster_gcn_ppi.py `_. - - Args: - cluster_data (torch_geometric.loader.ClusterData): The already - partioned data object. - **kwargs (optional): Additional arguments of - :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`, - :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`. - """ - def __init__(self, cluster_data, **kwargs): - self.cluster_data = cluster_data - - super().__init__(range(len(cluster_data)), collate_fn=self._collate, - **kwargs) - - def _collate(self, batch): - if not isinstance(batch, torch.Tensor): - batch = torch.tensor(batch) - - start = self.cluster_data.partptr[batch].tolist() - end = self.cluster_data.partptr[batch + 1].tolist() - node_idx = torch.cat([torch.arange(s, e) for s, e in zip(start, end)]) - - data = copy.copy(self.cluster_data.data) - - adj, data.adj = self.cluster_data.data.adj, None - adj = torch_sparse.cat( - [adj.narrow(0, s, e - s) for s, e in zip(start, end)], dim=0) - adj = adj.index_select(1, node_idx) - row, col, edge_idx = adj.coo() - - for key, value in data: - if key == 'num_nodes': - data.num_nodes = node_idx.numel() - elif self.cluster_data.data.is_node_attr(key): - cat_dim = self.cluster_data.data.__cat_dim__(key, value) - data[key] = select(value, node_idx, dim=cat_dim) - elif self.cluster_data.data.is_edge_attr(key): - cat_dim = self.cluster_data.data.__cat_dim__(key, value) - data[key] = select(value, edge_idx, dim=cat_dim) - - data.edge_index = torch.stack([row, col], dim=0) - - return data diff --git a/pytorch_geometric-2.3.1/torch_geometric/loader/dataloader.py b/pytorch_geometric-2.3.1/torch_geometric/loader/dataloader.py deleted file mode 100644 index f6c9e5b..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/loader/dataloader.py +++ /dev/null @@ -1,85 +0,0 @@ -from collections.abc import Mapping -from typing import List, Optional, Sequence, Union - -import torch.utils.data -from torch.utils.data.dataloader import default_collate - -from torch_geometric.data import Batch, Dataset -from torch_geometric.data.data import BaseData -from torch_geometric.data.datapipes import DatasetAdapter - - -class Collater: - def __init__(self, follow_batch, exclude_keys): - self.follow_batch = follow_batch - self.exclude_keys = exclude_keys - - def __call__(self, batch): - elem = batch[0] - if isinstance(elem, BaseData): - return Batch.from_data_list(batch, self.follow_batch, - self.exclude_keys) - elif isinstance(elem, torch.Tensor): - return default_collate(batch) - elif isinstance(elem, float): - return torch.tensor(batch, dtype=torch.float) - elif isinstance(elem, int): - return torch.tensor(batch) - elif isinstance(elem, str): - return batch - elif isinstance(elem, Mapping): - return {key: self([data[key] for data in batch]) for key in elem} - elif isinstance(elem, tuple) and hasattr(elem, '_fields'): - return type(elem)(*(self(s) for s in zip(*batch))) - elif isinstance(elem, Sequence) and not isinstance(elem, str): - return [self(s) for s in zip(*batch)] - - raise TypeError(f'DataLoader found invalid type: {type(elem)}') - - def collate(self, batch): # pragma: no cover - # TODO Deprecated, remove soon. - return self(batch) - - -class DataLoader(torch.utils.data.DataLoader): - r"""A data loader which merges data objects from a - :class:`torch_geometric.data.Dataset` to a mini-batch. - Data objects can be either of type :class:`~torch_geometric.data.Data` or - :class:`~torch_geometric.data.HeteroData`. - - Args: - dataset (Dataset): The dataset from which to load the data. - batch_size (int, optional): How many samples per batch to load. - (default: :obj:`1`) - shuffle (bool, optional): If set to :obj:`True`, the data will be - reshuffled at every epoch. (default: :obj:`False`) - follow_batch (List[str], optional): Creates assignment batch - vectors for each key in the list. (default: :obj:`None`) - exclude_keys (List[str], optional): Will exclude each key in the - list. (default: :obj:`None`) - **kwargs (optional): Additional arguments of - :class:`torch.utils.data.DataLoader`. - """ - def __init__( - self, - dataset: Union[Dataset, Sequence[BaseData], DatasetAdapter], - batch_size: int = 1, - shuffle: bool = False, - follow_batch: Optional[List[str]] = None, - exclude_keys: Optional[List[str]] = None, - **kwargs, - ): - # Remove for PyTorch Lightning: - kwargs.pop('collate_fn', None) - - # Save for PyTorch Lightning < 1.6: - self.follow_batch = follow_batch - self.exclude_keys = exclude_keys - - super().__init__( - dataset, - batch_size, - shuffle, - collate_fn=Collater(follow_batch, exclude_keys), - **kwargs, - ) diff --git a/pytorch_geometric-2.3.1/torch_geometric/loader/dynamic_batch_sampler.py b/pytorch_geometric-2.3.1/torch_geometric/loader/dynamic_batch_sampler.py deleted file mode 100644 index 0622706..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/loader/dynamic_batch_sampler.py +++ /dev/null @@ -1,106 +0,0 @@ -import warnings -from typing import Iterator, List, Optional - -import torch - -from torch_geometric.data import Dataset - - -class DynamicBatchSampler(torch.utils.data.sampler.Sampler): - r"""Dynamically adds samples to a mini-batch up to a maximum size (either - based on number of nodes or number of edges). When data samples have a - wide range in sizes, specifying a mini-batch size in terms of number of - samples is not ideal and can cause CUDA OOM errors. - - Within the :class:`DynamicBatchSampler`, the number of steps per epoch is - ambiguous, depending on the order of the samples. By default the - :meth:`__len__` will be undefined. This is fine for most cases but - progress bars will be infinite. Alternatively, :obj:`num_steps` can be - supplied to cap the number of mini-batches produced by the sampler. - - .. code-block:: python - - from torch_geometric.loader import DataLoader, DynamicBatchSampler - - sampler = DynamicBatchSampler(dataset, max_num=10000, mode="node") - loader = DataLoader(dataset, batch_sampler=sampler, ...) - - Args: - dataset (Dataset): Dataset to sample from. - max_num (int): Size of mini-batch to aim for in number of nodes or - edges. - mode (str, optional): :obj:`"node"` or :obj:`"edge"` to measure - batch size. (default: :obj:`"node"`) - shuffle (bool, optional): If set to :obj:`True`, will have the data - reshuffled at every epoch. (default: :obj:`False`) - skip_too_big (bool, optional): If set to :obj:`True`, skip samples - which cannot fit in a batch by itself. (default: :obj:`False`) - num_steps (int, optional): The number of mini-batches to draw for a - single epoch. If set to :obj:`None`, will iterate through all the - underlying examples, but :meth:`__len__` will be :obj:`None` since - it is be ambiguous. (default: :obj:`None`) - """ - def __init__(self, dataset: Dataset, max_num: int, mode: str = 'node', - shuffle: bool = False, skip_too_big: bool = False, - num_steps: Optional[int] = None): - if not isinstance(max_num, int) or max_num <= 0: - raise ValueError("`max_num` should be a positive integer value " - "(got {max_num}).") - if mode not in ['node', 'edge']: - raise ValueError("`mode` choice should be either " - f"'node' or 'edge' (got '{mode}').") - - if num_steps is None: - num_steps = len(dataset) - - self.dataset = dataset - self.max_num = max_num - self.mode = mode - self.shuffle = shuffle - self.skip_too_big = skip_too_big - self.num_steps = num_steps - - def __iter__(self) -> Iterator[List[int]]: - batch = [] - batch_n = 0 - num_steps = 0 - num_processed = 0 - - if self.shuffle: - indices = torch.randperm(len(self.dataset), dtype=torch.long) - else: - indices = torch.arange(len(self.dataset), dtype=torch.long) - - while (num_processed < len(self.dataset) - and num_steps < self.num_steps): - # Fill batch - for idx in indices[num_processed:]: - # Size of sample - data = self.dataset[idx] - n = data.num_nodes if self.mode == 'node' else data.num_edges - - if batch_n + n > self.max_num: - if batch_n == 0: - if self.skip_too_big: - continue - else: - warnings.warn("Size of data sample at index " - f"{idx} is larger than " - f"{self.max_num} {self.mode}s " - f"(got {n} {self.mode}s.") - else: - # Mini-batch filled - break - - # Add sample to current batch - batch.append(idx.item()) - num_processed += 1 - batch_n += n - - yield batch - batch = [] - batch_n = 0 - num_steps += 1 - - def __len__(self) -> int: - return self.num_steps diff --git a/pytorch_geometric-2.3.1/torch_geometric/loader/neighbor_loader.py b/pytorch_geometric-2.3.1/torch_geometric/loader/neighbor_loader.py deleted file mode 100644 index 3b3fe0a..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/loader/neighbor_loader.py +++ /dev/null @@ -1,219 +0,0 @@ -from typing import Callable, Dict, List, Optional, Tuple, Union - -from torch_geometric.data import Data, FeatureStore, GraphStore, HeteroData -from torch_geometric.loader.node_loader import NodeLoader -from torch_geometric.sampler import NeighborSampler -from torch_geometric.typing import EdgeType, InputNodes, OptTensor - - -class NeighborLoader(NodeLoader): - r"""A data loader that performs neighbor sampling as introduced in the - `"Inductive Representation Learning on Large Graphs" - `_ paper. - This loader allows for mini-batch training of GNNs on large-scale graphs - where full-batch training is not feasible. - - More specifically, :obj:`num_neighbors` denotes how much neighbors are - sampled for each node in each iteration. - :class:`~torch_geometric.loader.NeighborLoader` takes in this list of - :obj:`num_neighbors` and iteratively samples :obj:`num_neighbors[i]` for - each node involved in iteration :obj:`i - 1`. - - Sampled nodes are sorted based on the order in which they were sampled. - In particular, the first :obj:`batch_size` nodes represent the set of - original mini-batch nodes. - - .. code-block:: python - - from torch_geometric.datasets import Planetoid - from torch_geometric.loader import NeighborLoader - - data = Planetoid(path, name='Cora')[0] - - loader = NeighborLoader( - data, - # Sample 30 neighbors for each node for 2 iterations - num_neighbors=[30] * 2, - # Use a batch size of 128 for sampling training nodes - batch_size=128, - input_nodes=data.train_mask, - ) - - sampled_data = next(iter(loader)) - print(sampled_data.batch_size) - >>> 128 - - By default, the data loader will only include the edges that were - originally sampled (:obj:`directed = True`). - This option should only be used in case the number of hops is equivalent to - the number of GNN layers. - In case the number of GNN layers is greater than the number of hops, - consider setting :obj:`directed = False`, which will include all edges - between all sampled nodes (but is slightly slower as a result). - - Furthermore, :class:`~torch_geometric.loader.NeighborLoader` works for both - **homogeneous** graphs stored via :class:`~torch_geometric.data.Data` as - well as **heterogeneous** graphs stored via - :class:`~torch_geometric.data.HeteroData`. - When operating in heterogeneous graphs, up to :obj:`num_neighbors` - neighbors will be sampled for each :obj:`edge_type`. - However, more fine-grained control over - the amount of sampled neighbors of individual edge types is possible: - - .. code-block:: python - - from torch_geometric.datasets import OGB_MAG - from torch_geometric.loader import NeighborLoader - - hetero_data = OGB_MAG(path)[0] - - loader = NeighborLoader( - hetero_data, - # Sample 30 neighbors for each node and edge type for 2 iterations - num_neighbors={key: [30] * 2 for key in hetero_data.edge_types}, - # Use a batch size of 128 for sampling training nodes of type paper - batch_size=128, - input_nodes=('paper', hetero_data['paper'].train_mask), - ) - - sampled_hetero_data = next(iter(loader)) - print(sampled_hetero_data['paper'].batch_size) - >>> 128 - - .. note:: - - For an example of using - :class:`~torch_geometric.loader.NeighborLoader`, see - `examples/hetero/to_hetero_mag.py `_. - - The :class:`~torch_geometric.loader.NeighborLoader` will return subgraphs - where global node indices are mapped to local indices corresponding to this - specific subgraph. However, often times it is desired to map the nodes of - the current subgraph back to the global node indices. The - :class:`~torch_geometric.loader.NeighborLoader` will include this mapping - as part of the :obj:`data` object: - - .. code-block:: python - - loader = NeighborLoader(data, ...) - sampled_data = next(iter(loader)) - print(sampled_data.n_id) # Global node index of each node in batch. - - Args: - data (Any): A :class:`~torch_geometric.data.Data`, - :class:`~torch_geometric.data.HeteroData`, or - (:class:`~torch_geometric.data.FeatureStore`, - :class:`~torch_geometric.data.GraphStore`) data object. - num_neighbors (List[int] or Dict[Tuple[str, str, str], List[int]]): The - number of neighbors to sample for each node in each iteration. - If an entry is set to :obj:`-1`, all neighbors will be included. - In heterogeneous graphs, may also take in a dictionary denoting - the amount of neighbors to sample for each individual edge type. - input_nodes (torch.Tensor or str or Tuple[str, torch.Tensor]): The - indices of nodes for which neighbors are sampled to create - mini-batches. - Needs to be either given as a :obj:`torch.LongTensor` or - :obj:`torch.BoolTensor`. - If set to :obj:`None`, all nodes will be considered. - In heterogeneous graphs, needs to be passed as a tuple that holds - the node type and node indices. (default: :obj:`None`) - input_time (torch.Tensor, optional): Optional values to override the - timestamp for the input nodes given in :obj:`input_nodes`. If not - set, will use the timestamps in :obj:`time_attr` as default (if - present). The :obj:`time_attr` needs to be set for this to work. - (default: :obj:`None`) - replace (bool, optional): If set to :obj:`True`, will sample with - replacement. (default: :obj:`False`) - directed (bool, optional): If set to :obj:`False`, will include all - edges between all sampled nodes. (default: :obj:`True`) - disjoint (bool, optional): If set to :obj: `True`, each seed node will - create its own disjoint subgraph. - If set to :obj:`True`, mini-batch outputs will have a :obj:`batch` - vector holding the mapping of nodes to their respective subgraph. - Will get automatically set to :obj:`True` in case of temporal - sampling. (default: :obj:`False`) - temporal_strategy (str, optional): The sampling strategy when using - temporal sampling (:obj:`"uniform"`, :obj:`"last"`). - If set to :obj:`"uniform"`, will sample uniformly across neighbors - that fulfill temporal constraints. - If set to :obj:`"last"`, will sample the last `num_neighbors` that - fulfill temporal constraints. - (default: :obj:`"uniform"`) - time_attr (str, optional): The name of the attribute that denotes - timestamps for the nodes in the graph. - If set, temporal sampling will be used such that neighbors are - guaranteed to fulfill temporal constraints, *i.e.* neighbors have - an earlier or equal timestamp than the center node. - (default: :obj:`None`) - transform (callable, optional): A function/transform that takes in - a sampled mini-batch and returns a transformed version. - (default: :obj:`None`) - transform_sampler_output (callable, optional): A function/transform - that takes in a :class:`torch_geometric.sampler.SamplerOutput` and - returns a transformed version. (default: :obj:`None`) - is_sorted (bool, optional): If set to :obj:`True`, assumes that - :obj:`edge_index` is sorted by column. - If :obj:`time_attr` is set, additionally requires that rows are - sorted according to time within individual neighborhoods. - This avoids internal re-sorting of the data and can improve - runtime and memory efficiency. (default: :obj:`False`) - filter_per_worker (bool, optional): If set to :obj:`True`, will filter - the returning data in each worker's subprocess rather than in the - main process. - Setting this to :obj:`True` for in-memory datasets is generally not - recommended: - (1) it may result in too many open file handles, - (2) it may slown down data loading, - (3) it requires operating on CPU tensors. - (default: :obj:`False`) - **kwargs (optional): Additional arguments of - :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`, - :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`. - """ - def __init__( - self, - data: Union[Data, HeteroData, Tuple[FeatureStore, GraphStore]], - num_neighbors: Union[List[int], Dict[EdgeType, List[int]]], - input_nodes: InputNodes = None, - input_time: OptTensor = None, - replace: bool = False, - directed: bool = True, - disjoint: bool = False, - temporal_strategy: str = 'uniform', - time_attr: Optional[str] = None, - transform: Optional[Callable] = None, - transform_sampler_output: Optional[Callable] = None, - is_sorted: bool = False, - filter_per_worker: bool = False, - neighbor_sampler: Optional[NeighborSampler] = None, - **kwargs, - ): - if input_time is not None and time_attr is None: - raise ValueError("Received conflicting 'input_time' and " - "'time_attr' arguments: 'input_time' is set " - "while 'time_attr' is not set.") - - if neighbor_sampler is None: - neighbor_sampler = NeighborSampler( - data, - num_neighbors=num_neighbors, - replace=replace, - directed=directed, - disjoint=disjoint, - temporal_strategy=temporal_strategy, - time_attr=time_attr, - is_sorted=is_sorted, - share_memory=kwargs.get('num_workers', 0) > 0, - ) - - super().__init__( - data=data, - node_sampler=neighbor_sampler, - input_nodes=input_nodes, - input_time=input_time, - transform=transform, - transform_sampler_output=transform_sampler_output, - filter_per_worker=filter_per_worker, - **kwargs, - ) diff --git a/pytorch_geometric-2.3.1/torch_geometric/loader/temporal_dataloader.py b/pytorch_geometric-2.3.1/torch_geometric/loader/temporal_dataloader.py deleted file mode 100644 index d679579..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/loader/temporal_dataloader.py +++ /dev/null @@ -1,38 +0,0 @@ -from typing import List - -import torch - -from torch_geometric.data import TemporalData - - -class TemporalDataLoader(torch.utils.data.DataLoader): - r"""A data loader which merges succesive events of a - :class:`torch_geometric.data.TemporalData` to a mini-batch. - - Args: - data (TemporalData): The :obj:`~torch_geometric.data.TemporalData` - from which to load the data. - batch_size (int, optional): How many samples per batch to load. - (default: :obj:`1`) - **kwargs (optional): Additional arguments of - :class:`torch.utils.data.DataLoader`. - """ - def __init__(self, data: TemporalData, batch_size: int = 1, **kwargs): - # Remove for PyTorch Lightning: - kwargs.pop('dataset', None) - kwargs.pop('collate_fn', None) - - kwargs.pop('shuffle', None) - - self.data = data - self.events_per_batch = batch_size - - if kwargs.get('drop_last', False) and len(data) % batch_size != 0: - arange = range(0, len(data) - batch_size, batch_size) - else: - arange = range(0, len(data), batch_size) - - super().__init__(arange, 1, shuffle=False, collate_fn=self, **kwargs) - - def __call__(self, arange: List[int]) -> TemporalData: - return self.data[arange[0]:arange[0] + self.events_per_batch] diff --git a/pytorch_geometric-2.3.1/torch_geometric/loader/utils.py b/pytorch_geometric-2.3.1/torch_geometric/loader/utils.py deleted file mode 100644 index 6402e68..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/loader/utils.py +++ /dev/null @@ -1,303 +0,0 @@ -import copy -import math -from collections.abc import Sequence -from typing import Dict, Optional, Tuple, Union - -import numpy as np -import torch -from torch import Tensor - -from torch_geometric.data import ( - Data, - FeatureStore, - GraphStore, - HeteroData, - TensorAttr, - remote_backend_utils, -) -from torch_geometric.data.storage import EdgeStorage, NodeStorage -from torch_geometric.typing import ( - FeatureTensorType, - InputEdges, - InputNodes, - OptTensor, - SparseTensor, -) - - -def index_select(value: FeatureTensorType, index: Tensor, - dim: int = 0) -> Tensor: - - # PyTorch currently only supports indexing via `torch.int64` :( - index = index.to(torch.int64) - - if isinstance(value, Tensor): - out: Optional[Tensor] = None - if torch.utils.data.get_worker_info() is not None: - # If we are in a background process, we write directly into a - # shared memory tensor to avoid an extra copy: - size = list(value.shape) - size[dim] = index.numel() - numel = math.prod(size) - storage = value.storage()._new_shared(numel) - out = value.new(storage).view(size) - - return torch.index_select(value, dim, index, out=out) - - elif isinstance(value, np.ndarray): - return torch.from_numpy(np.take(value, index, axis=dim)) - - raise ValueError(f"Encountered invalid feature tensor type " - f"(got '{type(value)}')") - - -def filter_node_store_(store: NodeStorage, out_store: NodeStorage, - index: Tensor): - # Filters a node storage object to only hold the nodes in `index`: - for key, value in store.items(): - if key == 'num_nodes': - out_store.num_nodes = index.numel() - - elif store.is_node_attr(key): - if isinstance(value, Tensor): - index = index.to(value.device) - elif isinstance(value, np.ndarray): - index = index.cpu() - dim = store._parent().__cat_dim__(key, value, store) - out_store[key] = index_select(value, index, dim=dim) - - -def filter_edge_store_(store: EdgeStorage, out_store: EdgeStorage, row: Tensor, - col: Tensor, index: Tensor, perm: OptTensor = None): - # Filters a edge storage object to only hold the edges in `index`, - # which represents the new graph as denoted by `(row, col)`: - for key, value in store.items(): - if key == 'edge_index': - edge_index = torch.stack([row, col], dim=0) - out_store.edge_index = edge_index.to(value.device) - - elif key == 'adj_t': - # NOTE: We expect `(row, col)` to be sorted by `col` (CSC layout). - row = row.to(value.device()) - col = col.to(value.device()) - edge_attr = value.storage.value() - if edge_attr is not None: - index = index.to(edge_attr.device) - edge_attr = index_select(edge_attr, index, dim=0) - sparse_sizes = out_store.size()[::-1] - # TODO Currently, we set `is_sorted=False`, see: - # https://github.com/pyg-team/pytorch_geometric/issues/4346 - out_store.adj_t = SparseTensor(row=col, col=row, value=edge_attr, - sparse_sizes=sparse_sizes, - is_sorted=False, trust_data=True) - - elif store.is_edge_attr(key): - dim = store._parent().__cat_dim__(key, value, store) - if isinstance(value, Tensor): - index = index.to(value.device) - elif isinstance(value, np.ndarray): - index = index.cpu() - if perm is None: - out_store[key] = index_select(value, index, dim=dim) - else: - if isinstance(value, Tensor): - perm = perm.to(value.device) - elif isinstance(value, np.ndarray): - perm = perm.cpu() - out_store[key] = index_select( - value, - perm[index.to(torch.int64)], - dim=dim, - ) - - -def filter_data(data: Data, node: Tensor, row: Tensor, col: Tensor, - edge: Tensor, perm: OptTensor = None) -> Data: - # Filters a data object to only hold nodes in `node` and edges in `edge`: - out = copy.copy(data) - filter_node_store_(data._store, out._store, node) - filter_edge_store_(data._store, out._store, row, col, edge, perm) - return out - - -def filter_hetero_data( - data: HeteroData, - node_dict: Dict[str, Tensor], - row_dict: Dict[str, Tensor], - col_dict: Dict[str, Tensor], - edge_dict: Dict[str, Tensor], - perm_dict: Optional[Dict[str, OptTensor]] = None, -) -> HeteroData: - # Filters a heterogeneous data object to only hold nodes in `node` and - # edges in `edge` for each node and edge type, respectively: - out = copy.copy(data) - - for node_type in data.node_types: - filter_node_store_(data[node_type], out[node_type], - node_dict[node_type]) - - for edge_type in data.edge_types: - filter_edge_store_( - data[edge_type], - out[edge_type], - row_dict[edge_type], - col_dict[edge_type], - edge_dict[edge_type], - perm_dict[edge_type] if perm_dict else None, - ) - - return out - - -def filter_custom_store( - feature_store: FeatureStore, - graph_store: GraphStore, - node_dict: Dict[str, Tensor], - row_dict: Dict[str, Tensor], - col_dict: Dict[str, Tensor], - edge_dict: Dict[str, Tensor], - custom_cls: Optional[HeteroData] = None, -) -> HeteroData: - r"""Constructs a `HeteroData` object from a feature store that only holds - nodes in `node` end edges in `edge` for each node and edge type, - respectively.""" - - # Construct a new `HeteroData` object: - data = custom_cls() if custom_cls is not None else HeteroData() - - # Filter edge storage: - # TODO support edge attributes - for attr in graph_store.get_all_edge_attrs(): - key = attr.edge_type - if key in row_dict and key in col_dict: - edge_index = torch.stack([row_dict[key], col_dict[key]], dim=0) - data[attr.edge_type].edge_index = edge_index - - # Filter node storage: - required_attrs = [] - for attr in feature_store.get_all_tensor_attrs(): - if attr.group_name in node_dict: - attr.index = node_dict[attr.group_name] - required_attrs.append(attr) - data[attr.group_name].num_nodes = attr.index.size(0) - - # NOTE Here, we utilize `feature_store.multi_get` to give the feature store - # full control over optimizing how it returns features (since the call is - # synchronous, this amounts to giving the feature store control over all - # iteration). - tensors = feature_store.multi_get_tensor(required_attrs) - for i, attr in enumerate(required_attrs): - data[attr.group_name][attr.attr_name] = tensors[i] - - return data - - -# Input Utilities ############################################################# - - -def get_input_nodes( - data: Union[Data, HeteroData, Tuple[FeatureStore, GraphStore]], - input_nodes: Union[InputNodes, TensorAttr], -) -> Tuple[Optional[str], Sequence]: - def to_index(tensor): - if isinstance(tensor, Tensor) and tensor.dtype == torch.bool: - return tensor.nonzero(as_tuple=False).view(-1) - if not isinstance(tensor, Tensor): - return torch.tensor(tensor, dtype=torch.long) - return tensor - - if isinstance(data, Data): - if input_nodes is None: - return None, torch.arange(data.num_nodes) - return None, to_index(input_nodes) - - elif isinstance(data, HeteroData): - assert input_nodes is not None - - if isinstance(input_nodes, str): - return input_nodes, torch.arange(data[input_nodes].num_nodes) - - assert isinstance(input_nodes, (list, tuple)) - assert len(input_nodes) == 2 - assert isinstance(input_nodes[0], str) - - node_type, input_nodes = input_nodes - if input_nodes is None: - return node_type, torch.arange(data[node_type].num_nodes) - return node_type, to_index(input_nodes) - - else: # Tuple[FeatureStore, GraphStore] - feature_store, graph_store = data - assert input_nodes is not None - - if isinstance(input_nodes, Tensor): - return None, to_index(input_nodes) - - if isinstance(input_nodes, str): - return input_nodes, torch.arange( - remote_backend_utils.num_nodes(feature_store, graph_store, - input_nodes)) - - if isinstance(input_nodes, (list, tuple)): - assert len(input_nodes) == 2 - assert isinstance(input_nodes[0], str) - - node_type, input_nodes = input_nodes - if input_nodes is None: - return node_type, torch.arange( - remote_backend_utils.num_nodes(feature_store, graph_store, - input_nodes)) - return node_type, to_index(input_nodes) - - -def get_edge_label_index( - data: Union[Data, HeteroData, Tuple[FeatureStore, GraphStore]], - edge_label_index: InputEdges, -) -> Tuple[Optional[str], Tensor]: - edge_type = None - if isinstance(data, Data): - if edge_label_index is None: - return None, data.edge_index - return None, edge_label_index - - assert edge_label_index is not None - assert isinstance(edge_label_index, (list, tuple)) - - if isinstance(data, HeteroData): - if isinstance(edge_label_index[0], str): - edge_type = edge_label_index - edge_type = data._to_canonical(*edge_type) - assert edge_type in data.edge_types - return edge_type, data[edge_type].edge_index - - assert len(edge_label_index) == 2 - - edge_type, edge_label_index = edge_label_index - edge_type = data._to_canonical(*edge_type) - - if edge_label_index is None: - return edge_type, data[edge_type].edge_index - - return edge_type, edge_label_index - - else: # Tuple[FeatureStore, GraphStore] - _, graph_store = data - - # Need the edge index in COO for LinkNeighborLoader: - def _get_edge_index(edge_type): - row_dict, col_dict, _ = graph_store.coo([edge_type]) - row = list(row_dict.values())[0] - col = list(col_dict.values())[0] - return torch.stack((row, col), dim=0) - - if isinstance(edge_label_index[0], str): - edge_type = edge_label_index - return edge_type, _get_edge_index(edge_type) - - assert len(edge_label_index) == 2 - edge_type, edge_label_index = edge_label_index - - if edge_label_index is None: - return edge_type, _get_edge_index(edge_type) - - return edge_type, edge_label_index diff --git a/pytorch_geometric-2.3.1/torch_geometric/loader/zip_loader.py b/pytorch_geometric-2.3.1/torch_geometric/loader/zip_loader.py deleted file mode 100644 index cfdf29c..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/loader/zip_loader.py +++ /dev/null @@ -1,76 +0,0 @@ -from typing import Any, Iterator, List, Tuple, Union - -import torch -from torch import Tensor - -from torch_geometric.data import Data, HeteroData -from torch_geometric.loader import LinkLoader, NodeLoader -from torch_geometric.loader.base import DataLoaderIterator - - -class ZipLoader(torch.utils.data.DataLoader): - r"""A loader that returns a tuple of data objects by sampling from multiple - :class:`NodeLoader` or :class:`LinkLoader` instances. - - Args: - loaders (List[NodeLoader] or List[LinkLoader]): The loader instances. - filter_per_worker (bool, optional): If set to :obj:`True`, will filter - the returning data in each worker's subprocess rather than in the - main process. - Setting this to :obj:`True` for in-memory datasets is generally not - recommended: - (1) it may result in too many open file handles, - (2) it may slown down data loading, - (3) it requires operating on CPU tensors. - (default: :obj:`False`) - **kwargs (optional): Additional arguments of - :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`, - :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`. - """ - def __init__( - self, - loaders: Union[List[NodeLoader], List[LinkLoader]], - filter_per_worker: bool = False, - **kwargs, - ): - # Remove for PyTorch Lightning: - kwargs.pop('dataset', None) - kwargs.pop('collate_fn', None) - - for loader in loaders: - if not callable(getattr(loader, 'collate_fn', None)): - raise ValueError("'{loader.__class__.__name__}' does not have " - "a 'collate_fn' method") - if not callable(getattr(loader, 'filter_fn', None)): - raise ValueError("'{loader.__class__.__name__}' does not have " - "a 'filter_fn' method") - loader.filter_per_worker = filter_per_worker - - iterator = range(min([len(loader.dataset) for loader in loaders])) - super().__init__(iterator, collate_fn=self.collate_fn, **kwargs) - - self.loaders = loaders - self.filter_per_worker = filter_per_worker - - def collate_fn(self, index: List[int]) -> Tuple[Any, ...]: - if not isinstance(index, Tensor): - index = torch.tensor(index, dtype=torch.long) - - return tuple(loader.collate_fn(index) for loader in self.loaders) - - def filter_fn( - self, - outs: Tuple[Any, ...], - ) -> Tuple[Union[Data, HeteroData], ...]: - loaders = self.loaders - return tuple(loader.filter_fn(v) for loader, v in zip(loaders, outs)) - - def _get_iterator(self) -> Iterator: - if self.filter_per_worker: - return super()._get_iterator() - - # Execute `filter_fn` in the main process: - return DataLoaderIterator(super()._get_iterator(), self.filter_fn) - - def __repr__(self) -> str: - return f'{self.__class__.__name__}(loaders={self.loaders})' diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/nn/__init__.py deleted file mode 100644 index fb6c56b..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -from .reshape import Reshape -from .sequential import Sequential -from .data_parallel import DataParallel -from .to_hetero_transformer import to_hetero -from .to_hetero_with_bases_transformer import to_hetero_with_bases -from .to_fixed_size_transformer import to_fixed_size -from .encoding import PositionalEncoding, TemporalEncoding -from .model_hub import PyGModelHubMixin -from .summary import summary - -from .aggr import * # noqa -from .conv import * # noqa -from .pool import * # noqa -from .glob import * # noqa -from .norm import * # noqa -from .unpool import * # noqa -from .dense import * # noqa -from .kge import * # noqa -from .models import * # noqa -from .functional import * # noqa - -__all__ = [ - 'Reshape', - 'Sequential', - 'DataParallel', - 'to_hetero', - 'to_hetero_with_bases', - 'to_fixed_size', - 'PositionalEncoding', - 'TemporalEncoding', - 'PyGModelHubMixin', - 'summary', -] diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/__init__.py deleted file mode 100644 index bbb6708..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -from .base import Aggregation -from .multi import MultiAggregation -from .basic import ( - MeanAggregation, - SumAggregation, - MaxAggregation, - MinAggregation, - MulAggregation, - VarAggregation, - StdAggregation, - SoftmaxAggregation, - PowerMeanAggregation, -) -from .quantile import MedianAggregation, QuantileAggregation -from .lstm import LSTMAggregation -from .gru import GRUAggregation -from .set2set import Set2Set -from .scaler import DegreeScalerAggregation -from .equilibrium import EquilibriumAggregation -from .sort import SortAggregation -from .gmt import GraphMultisetTransformer -from .attention import AttentionalAggregation -from .mlp import MLPAggregation -from .deep_sets import DeepSetsAggregation -from .set_transformer import SetTransformerAggregation - -__all__ = classes = [ - 'Aggregation', - 'MultiAggregation', - 'SumAggregation', - 'MeanAggregation', - 'MaxAggregation', - 'MinAggregation', - 'MulAggregation', - 'VarAggregation', - 'StdAggregation', - 'SoftmaxAggregation', - 'PowerMeanAggregation', - 'MedianAggregation', - 'QuantileAggregation', - 'LSTMAggregation', - 'GRUAggregation', - 'Set2Set', - 'DegreeScalerAggregation', - 'SortAggregation', - 'GraphMultisetTransformer', - 'AttentionalAggregation', - 'EquilibriumAggregation', - 'MLPAggregation', - 'DeepSetsAggregation', - 'SetTransformerAggregation', -] diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/base.py b/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/base.py deleted file mode 100644 index 160caed..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/base.py +++ /dev/null @@ -1,188 +0,0 @@ -from typing import Optional, Tuple - -import torch -from torch import Tensor - -from torch_geometric.utils import scatter, segment, to_dense_batch - - -class Aggregation(torch.nn.Module): - r"""An abstract base class for implementing custom aggregations. - - Aggregation can be either performed via an :obj:`index` vector, which - defines the mapping from input elements to their location in the output: - - | - - .. image:: https://raw.githubusercontent.com/rusty1s/pytorch_scatter/ - master/docs/source/_figures/add.svg?sanitize=true - :align: center - :width: 400px - - | - - Notably, :obj:`index` does not have to be sorted (for most aggregation - operators): - - .. code-block:: - - # Feature matrix holding 10 elements with 64 features each: - x = torch.randn(10, 64) - - # Assign each element to one of three sets: - index = torch.tensor([0, 0, 1, 0, 2, 0, 2, 1, 0, 2]) - - output = aggr(x, index) # Output shape: [3, 64] - - Alternatively, aggregation can be achieved via a "compressed" index vector - called :obj:`ptr`. Here, elements within the same set need to be grouped - together in the input, and :obj:`ptr` defines their boundaries: - - .. code-block:: - - # Feature matrix holding 10 elements with 64 features each: - x = torch.randn(10, 64) - - # Define the boundary indices for three sets: - ptr = torch.tensor([0, 4, 7, 10]) - - output = aggr(x, ptr=ptr) # Output shape: [4, 64] - - Note that at least one of :obj:`index` or :obj:`ptr` must be defined. - - Shapes: - - **input:** - node features :math:`(*, |\mathcal{V}|, F_{in})` or edge features - :math:`(*, |\mathcal{E}|, F_{in})`, - index vector :math:`(|\mathcal{V}|)` or :math:`(|\mathcal{E}|)`, - - **output:** graph features :math:`(*, |\mathcal{G}|, F_{out})` or - node features :math:`(*, |\mathcal{V}|, F_{out})` - """ - def forward(self, x: Tensor, index: Optional[Tensor] = None, - ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, - dim: int = -2) -> Tensor: - r""" - Args: - x (torch.Tensor): The source tensor. - index (torch.Tensor, optional): The indices of elements for - applying the aggregation. - One of :obj:`index` or :obj:`ptr` must be defined. - (default: :obj:`None`) - ptr (torch.Tensor, optional): If given, computes the aggregation - based on sorted inputs in CSR representation. - One of :obj:`index` or :obj:`ptr` must be defined. - (default: :obj:`None`) - dim_size (int, optional): The size of the output tensor at - dimension :obj:`dim` after aggregation. (default: :obj:`None`) - dim (int, optional): The dimension in which to aggregate. - (default: :obj:`-2`) - """ - pass - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - pass - - def __call__(self, x: Tensor, index: Optional[Tensor] = None, - ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, - dim: int = -2, **kwargs) -> Tensor: - - if dim >= x.dim() or dim < -x.dim(): - raise ValueError(f"Encountered invalid dimension '{dim}' of " - f"source tensor with {x.dim()} dimensions") - - if index is None and ptr is None: - index = x.new_zeros(x.size(dim), dtype=torch.long) - - if ptr is not None: - if dim_size is None: - dim_size = ptr.numel() - 1 - elif dim_size != ptr.numel() - 1: - raise ValueError(f"Encountered invalid 'dim_size' (got " - f"'{dim_size}' but expected " - f"'{ptr.numel() - 1}')") - - if index is not None and dim_size is None: - dim_size = int(index.max()) + 1 if index.numel() > 0 else 0 - - try: - return super().__call__(x, index, ptr, dim_size, dim, **kwargs) - except (IndexError, RuntimeError) as e: - if index is not None: - if index.numel() > 0 and dim_size <= int(index.max()): - raise ValueError(f"Encountered invalid 'dim_size' (got " - f"'{dim_size}' but expected " - f">= '{int(index.max()) + 1}')") - raise e - - def __repr__(self) -> str: - return f'{self.__class__.__name__}()' - - # Assertions ############################################################## - - def assert_index_present(self, index: Optional[Tensor]): - # TODO Currently, not all aggregators support `ptr`. This assert helps - # to ensure that we require `index` to be passed to the computation: - if index is None: - raise NotImplementedError( - "Aggregation requires 'index' to be specified") - - def assert_sorted_index(self, index: Optional[Tensor]): - if index is not None and not torch.all(index[:-1] <= index[1:]): - raise ValueError("Can not perform aggregation since the 'index' " - "tensor is not sorted") - - def assert_two_dimensional_input(self, x: Tensor, dim: int): - if x.dim() != 2: - raise ValueError(f"Aggregation requires two-dimensional inputs " - f"(got '{x.dim()}')") - - if dim not in [-2, 0]: - raise ValueError(f"Aggregation needs to perform aggregation in " - f"first dimension (got '{dim}')") - - # Helper methods ########################################################## - - def reduce(self, x: Tensor, index: Optional[Tensor] = None, - ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, - dim: int = -2, reduce: str = 'sum') -> Tensor: - - if ptr is not None: - ptr = expand_left(ptr, dim, dims=x.dim()) - return segment(x, ptr, reduce=reduce) - - assert index is not None - return scatter(x, index, dim, dim_size, reduce) - - def to_dense_batch( - self, - x: Tensor, - index: Optional[Tensor] = None, - ptr: Optional[Tensor] = None, - dim_size: Optional[int] = None, - dim: int = -2, - fill_value: float = 0., - max_num_elements: Optional[int] = None, - ) -> Tuple[Tensor, Tensor]: - - # TODO Currently, `to_dense_batch` can only operate on `index`: - self.assert_index_present(index) - self.assert_sorted_index(index) - self.assert_two_dimensional_input(x, dim) - - return to_dense_batch( - x, - index, - batch_size=dim_size, - fill_value=fill_value, - max_num_nodes=max_num_elements, - ) - - -############################################################################### - - -def expand_left(ptr: Tensor, dim: int, dims: int) -> Tensor: - for _ in range(dims + dim if dim < 0 else dim): - ptr = ptr.unsqueeze(0) - return ptr diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/gru.py b/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/gru.py deleted file mode 100644 index 59d8917..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/gru.py +++ /dev/null @@ -1,40 +0,0 @@ -from typing import Optional - -from torch import Tensor -from torch.nn import GRU - -from torch_geometric.nn.aggr import Aggregation - - -class GRUAggregation(Aggregation): - r"""Performs GRU aggregation in which the elements to aggregate are - interpreted as a sequence, as described in the `"Graph Neural Networks - with Adaptive Readouts" `_ paper. - - .. warning:: - :class:`GRUAggregation` is not a permutation-invariant operator. - - Args: - in_channels (int): Size of each input sample. - out_channels (int): Size of each output sample. - **kwargs (optional): Additional arguments of :class:`torch.nn.GRU`. - """ - def __init__(self, in_channels: int, out_channels: int, **kwargs): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.gru = GRU(in_channels, out_channels, batch_first=True, **kwargs) - self.reset_parameters() - - def reset_parameters(self): - self.gru.reset_parameters() - - def forward(self, x: Tensor, index: Optional[Tensor] = None, - ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, - dim: int = -2) -> Tensor: - x, _ = self.to_dense_batch(x, index, ptr, dim_size, dim) - return self.gru(x)[0][:, -1] - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.in_channels}, ' - f'{self.out_channels})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/lstm.py b/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/lstm.py deleted file mode 100644 index f784b95..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/lstm.py +++ /dev/null @@ -1,40 +0,0 @@ -from typing import Optional - -from torch import Tensor -from torch.nn import LSTM - -from torch_geometric.nn.aggr import Aggregation - - -class LSTMAggregation(Aggregation): - r"""Performs LSTM-style aggregation in which the elements to aggregate are - interpreted as a sequence, as described in the `"Inductive Representation - Learning on Large Graphs" `_ paper. - - .. warning:: - :class:`LSTMAggregation` is not a permutation-invariant operator. - - Args: - in_channels (int): Size of each input sample. - out_channels (int): Size of each output sample. - **kwargs (optional): Additional arguments of :class:`torch.nn.LSTM`. - """ - def __init__(self, in_channels: int, out_channels: int, **kwargs): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.lstm = LSTM(in_channels, out_channels, batch_first=True, **kwargs) - self.reset_parameters() - - def reset_parameters(self): - self.lstm.reset_parameters() - - def forward(self, x: Tensor, index: Optional[Tensor] = None, - ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, - dim: int = -2) -> Tensor: - x, _ = self.to_dense_batch(x, index, ptr, dim_size, dim) - return self.lstm(x)[0][:, -1] - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.in_channels}, ' - f'{self.out_channels})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/mlp.py b/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/mlp.py deleted file mode 100644 index bfbd34c..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/mlp.py +++ /dev/null @@ -1,52 +0,0 @@ -from typing import Optional - -from torch import Tensor - -from torch_geometric.nn.aggr import Aggregation - - -class MLPAggregation(Aggregation): - r"""Performs MLP aggregation in which the elements to aggregate are - flattened into a single vectorial representation, and are then processed by - a Multi-Layer Perceptron (MLP), as described in the `"Graph Neural Networks - with Adaptive Readouts" `_ paper. - - .. warning:: - :class:`MLPAggregation` is not a permutation-invariant operator. - - Args: - in_channels (int): Size of each input sample. - out_channels (int): Size of each output sample. - max_num_elements (int): The maximum number of elements to aggregate per - group. - **kwargs (optional): Additional arguments of - :class:`torch_geometric.nn.models.MLP`. - """ - def __init__(self, in_channels: int, out_channels: int, - max_num_elements: int, **kwargs): - super().__init__() - - self.in_channels = in_channels - self.out_channels = out_channels - self.max_num_elements = max_num_elements - - from torch_geometric.nn import MLP - self.mlp = MLP(in_channels=in_channels * max_num_elements, - out_channels=out_channels, **kwargs) - - self.reset_parameters() - - def reset_parameters(self): - self.mlp.reset_parameters() - - def forward(self, x: Tensor, index: Optional[Tensor] = None, - ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, - dim: int = -2) -> Tensor: - x, _ = self.to_dense_batch(x, index, ptr, dim_size, dim, - max_num_elements=self.max_num_elements) - return self.mlp(x.view(-1, x.size(1) * x.size(2))) - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.in_channels}, ' - f'{self.out_channels}, ' - f'max_num_elements={self.max_num_elements})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/sort.py b/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/sort.py deleted file mode 100644 index 05299d4..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/sort.py +++ /dev/null @@ -1,52 +0,0 @@ -from typing import Optional - -import torch -from torch import Tensor - -from torch_geometric.nn.aggr import Aggregation - - -class SortAggregation(Aggregation): - r"""The pooling operator from the `"An End-to-End Deep Learning - Architecture for Graph Classification" - `_ paper, - where node features are sorted in descending order based on their last - feature channel. The first :math:`k` nodes form the output of the layer. - - Args: - k (int): The number of nodes to hold for each graph. - """ - def __init__(self, k: int): - super().__init__() - self.k = k - - def forward(self, x: Tensor, index: Optional[Tensor] = None, - ptr: Optional[Tensor] = None, dim_size: Optional[int] = None, - dim: int = -2) -> Tensor: - - fill_value = x.min().item() - 1 - batch_x, _ = self.to_dense_batch(x, index, ptr, dim_size, dim, - fill_value=fill_value) - B, N, D = batch_x.size() - - _, perm = batch_x[:, :, -1].sort(dim=-1, descending=True) - arange = torch.arange(B, dtype=torch.long, device=perm.device) * N - perm = perm + arange.view(-1, 1) - - batch_x = batch_x.view(B * N, D) - batch_x = batch_x[perm] - batch_x = batch_x.view(B, N, D) - - if N >= self.k: - batch_x = batch_x[:, :self.k].contiguous() - else: - expand_batch_x = batch_x.new_full((B, self.k - N, D), fill_value) - batch_x = torch.cat([batch_x, expand_batch_x], dim=1) - - batch_x[batch_x == fill_value] = 0 - x = batch_x.view(B, self.k * D) - - return x - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}(k={self.k})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/utils.py b/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/utils.py deleted file mode 100644 index 81c1326..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/aggr/utils.py +++ /dev/null @@ -1,219 +0,0 @@ -from typing import Optional - -import torch -from torch import Tensor -from torch.nn import LayerNorm, Linear, MultiheadAttention, Parameter - - -class MultiheadAttentionBlock(torch.nn.Module): - r"""The Multihead Attention Block (MAB) from the `"Set Transformer: A - Framework for Attention-based Permutation-Invariant Neural Networks" - `_ paper - - .. math:: - - \mathrm{MAB}(\mathbf{x}, \mathbf{y}) &= \mathrm{LayerNorm}(\mathbf{h} + - \mathbf{W} \mathbf{h}) - - \mathbf{h} &= \mathrm{LayerNorm}(\mathbf{x} + - \mathrm{Multihead}(\mathbf{x}, \mathbf{y}, \mathbf{y})) - - Args: - channels (int): Size of each input sample. - heads (int, optional): Number of multi-head-attentions. - (default: :obj:`1`) - norm (str, optional): If set to :obj:`False`, will not apply layer - normalization. (default: :obj:`True`) - dropout (float, optional): Dropout probability of attention weights. - (default: :obj:`0`) - """ - def __init__(self, channels: int, heads: int = 1, layer_norm: bool = True, - dropout: float = 0.0): - super().__init__() - - self.channels = channels - self.heads = heads - self.dropout = dropout - - self.attn = MultiheadAttention( - channels, - heads, - batch_first=True, - dropout=dropout, - ) - self.lin = Linear(channels, channels) - self.layer_norm1 = LayerNorm(channels) if layer_norm else None - self.layer_norm2 = LayerNorm(channels) if layer_norm else None - - def reset_parameters(self): - self.attn._reset_parameters() - self.lin.reset_parameters() - if self.layer_norm1 is not None: - self.layer_norm1.reset_parameters() - if self.layer_norm2 is not None: - self.layer_norm2.reset_parameters() - - def forward(self, x: Tensor, y: Tensor, x_mask: Optional[Tensor] = None, - y_mask: Optional[Tensor] = None) -> Tensor: - """""" - if y_mask is not None: - y_mask = ~y_mask - - out, _ = self.attn(x, y, y, y_mask, need_weights=False) - - if x_mask is not None: - out[~x_mask] = 0. - - out = out + x - - if self.layer_norm1 is not None: - out = self.layer_norm1(out) - - out = out + self.lin(out).relu() - - if self.layer_norm2 is not None: - out = self.layer_norm2(out) - - return out - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.channels}, ' - f'heads={self.heads}, ' - f'layer_norm={self.layer_norm1 is not None}, ' - f'dropout={self.dropout})') - - -class SetAttentionBlock(torch.nn.Module): - r"""The Set Attention Block (SAB) from the `"Set Transformer: A - Framework for Attention-based Permutation-Invariant Neural Networks" - `_ paper - - .. math:: - - \mathrm{SAB}(\mathbf{X}) = \mathrm{MAB}(\mathbf{x}, \mathbf{y}) - - Args: - channels (int): Size of each input sample. - heads (int, optional): Number of multi-head-attentions. - (default: :obj:`1`) - norm (str, optional): If set to :obj:`False`, will not apply layer - normalization. (default: :obj:`True`) - dropout (float, optional): Dropout probability of attention weights. - (default: :obj:`0`) - """ - def __init__(self, channels: int, heads: int = 1, layer_norm: bool = True, - dropout: float = 0.0): - super().__init__() - self.mab = MultiheadAttentionBlock(channels, heads, layer_norm, - dropout) - - def reset_parameters(self): - self.mab.reset_parameters() - - def forward(self, x: Tensor, mask: Optional[Tensor] = None) -> Tensor: - return self.mab(x, x, mask, mask) - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.mab.channels}, ' - f'heads={self.mab.heads}, ' - f'layer_norm={self.mab.layer_norm1 is not None}, ' - f'dropout={self.mab.dropout})') - - -class InducedSetAttentionBlock(torch.nn.Module): - r"""The Induced Set Attention Block (SAB) from the `"Set Transformer: A - Framework for Attention-based Permutation-Invariant Neural Networks" - `_ paper - - .. math:: - - \mathrm{ISAB}(\mathbf{X}) &= \mathrm{MAB}(\mathbf{x}, \mathbf{h}) - - \mathbf{h} &= \mathrm{MAB}(\mathbf{I}, \mathbf{x}) - - where :math:`\mathbf{I}` denotes :obj:`num_induced_points` learnable - vectors. - - Args: - channels (int): Size of each input sample. - num_induced_points (int): Number of induced points. - heads (int, optional): Number of multi-head-attentions. - (default: :obj:`1`) - norm (str, optional): If set to :obj:`False`, will not apply layer - normalization. (default: :obj:`True`) - dropout (float, optional): Dropout probability of attention weights. - (default: :obj:`0`) - """ - def __init__(self, channels: int, num_induced_points: int, heads: int = 1, - layer_norm: bool = True, dropout: float = 0.0): - super().__init__() - self.ind = Parameter(torch.Tensor(1, num_induced_points, channels)) - self.mab1 = MultiheadAttentionBlock(channels, heads, layer_norm, - dropout) - self.mab2 = MultiheadAttentionBlock(channels, heads, layer_norm, - dropout) - self.reset_parameters() - - def reset_parameters(self): - torch.nn.init.xavier_uniform_(self.ind) - self.mab1.reset_parameters() - self.mab2.reset_parameters() - - def forward(self, x: Tensor, mask: Optional[Tensor] = None) -> Tensor: - h = self.mab1(self.ind.expand(x.size(0), -1, -1), x, y_mask=mask) - return self.mab2(x, h, x_mask=mask) - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.ind.size(2)}, ' - f'num_induced_points={self.ind.size(1)}, ' - f'heads={self.mab1.heads}, ' - f'layer_norm={self.mab1.layer_norm1 is not None}, ' - f'dropout={self.mab1.dropout})') - - -class PoolingByMultiheadAttention(torch.nn.Module): - r"""The Pooling by Multihead Attention (PMA) layer from the `"Set - Transformer: A Framework for Attention-based Permutation-Invariant Neural - Networks" `_ paper - - .. math:: - - \mathrm{PMA}(\mathbf{X}) = \mathrm{MAB}(\mathbf{S}, \mathbf{x}) - - where :math:`\mathbf{S}` denotes :obj:`num_seed_points` learnable vectors. - - Args: - channels (int): Size of each input sample. - num_seed_points (int, optional): Number of seed points. - (default: :obj:`1`) - heads (int, optional): Number of multi-head-attentions. - (default: :obj:`1`) - norm (str, optional): If set to :obj:`False`, will not apply layer - normalization. (default: :obj:`True`) - dropout (float, optional): Dropout probability of attention weights. - (default: :obj:`0`) - """ - def __init__(self, channels: int, num_seed_points: int = 1, heads: int = 1, - layer_norm: bool = True, dropout: float = 0.0): - super().__init__() - self.lin = Linear(channels, channels) - self.seed = Parameter(torch.Tensor(1, num_seed_points, channels)) - self.mab = MultiheadAttentionBlock(channels, heads, layer_norm, - dropout) - self.reset_parameters() - - def reset_parameters(self): - self.lin.reset_parameters() - torch.nn.init.xavier_uniform_(self.seed) - self.mab.reset_parameters() - - def forward(self, x: Tensor, mask: Optional[Tensor] = None) -> Tensor: - x = self.lin(x).relu() - return self.mab(self.seed.expand(x.size(0), -1, -1), x, y_mask=mask) - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.seed.size(2)}, ' - f'num_seed_points={self.seed.size(1)}, ' - f'heads={self.mab.heads}, ' - f'layer_norm={self.mab.layer_norm1 is not None}, ' - f'dropout={self.mab.dropout})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/nn/conv/__init__.py deleted file mode 100644 index a0998c3..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/__init__.py +++ /dev/null @@ -1,135 +0,0 @@ -from .message_passing import MessagePassing -from .simple_conv import SimpleConv -from .gcn_conv import GCNConv -from .cheb_conv import ChebConv -from .sage_conv import SAGEConv -from .cugraph.sage_conv import CuGraphSAGEConv -from .graph_conv import GraphConv -from .gravnet_conv import GravNetConv -from .gated_graph_conv import GatedGraphConv -from .res_gated_graph_conv import ResGatedGraphConv -from .gat_conv import GATConv -from .cugraph.gat_conv import CuGraphGATConv -from .fused_gat_conv import FusedGATConv -from .gatv2_conv import GATv2Conv -from .transformer_conv import TransformerConv -from .agnn_conv import AGNNConv -from .tag_conv import TAGConv -from .gin_conv import GINConv, GINEConv -from .arma_conv import ARMAConv -from .sg_conv import SGConv -from .appnp import APPNP -from .mf_conv import MFConv -from .rgcn_conv import RGCNConv, FastRGCNConv -from .cugraph.rgcn_conv import CuGraphRGCNConv -from .rgat_conv import RGATConv -from .signed_conv import SignedConv -from .dna_conv import DNAConv -from .point_conv import PointNetConv -from .gmm_conv import GMMConv -from .spline_conv import SplineConv -from .nn_conv import NNConv -from .cg_conv import CGConv -from .edge_conv import EdgeConv, DynamicEdgeConv -from .x_conv import XConv -from .ppf_conv import PPFConv -from .feast_conv import FeaStConv -from .point_transformer_conv import PointTransformerConv -from .hypergraph_conv import HypergraphConv -from .le_conv import LEConv -from .pna_conv import PNAConv -from .cluster_gcn_conv import ClusterGCNConv -from .gen_conv import GENConv -from .gcn2_conv import GCN2Conv -from .pan_conv import PANConv -from .wl_conv import WLConv -from .wl_conv_continuous import WLConvContinuous -from .film_conv import FiLMConv -from .supergat_conv import SuperGATConv -from .fa_conv import FAConv -from .eg_conv import EGConv -from .pdn_conv import PDNConv -from .general_conv import GeneralConv -from .hgt_conv import HGTConv -from .fast_hgt_conv import FastHGTConv -from .heat_conv import HEATConv -from .hetero_conv import HeteroConv -from .han_conv import HANConv -from .lg_conv import LGConv -from .ssg_conv import SSGConv -from .point_gnn_conv import PointGNNConv -from .gps_conv import GPSConv -from .antisymmetric_conv import AntiSymmetricConv - -__all__ = [ - 'MessagePassing', - 'SimpleConv', - 'GCNConv', - 'ChebConv', - 'SAGEConv', - 'CuGraphSAGEConv', - 'GraphConv', - 'GravNetConv', - 'GatedGraphConv', - 'ResGatedGraphConv', - 'GATConv', - 'CuGraphGATConv', - 'FusedGATConv', - 'GATv2Conv', - 'TransformerConv', - 'AGNNConv', - 'TAGConv', - 'GINConv', - 'GINEConv', - 'ARMAConv', - 'SGConv', - 'SSGConv', - 'APPNP', - 'MFConv', - 'RGCNConv', - 'FastRGCNConv', - 'CuGraphRGCNConv', - 'RGATConv', - 'SignedConv', - 'DNAConv', - 'PointNetConv', - 'GMMConv', - 'SplineConv', - 'NNConv', - 'CGConv', - 'EdgeConv', - 'DynamicEdgeConv', - 'XConv', - 'PPFConv', - 'FeaStConv', - 'PointTransformerConv', - 'HypergraphConv', - 'LEConv', - 'PNAConv', - 'ClusterGCNConv', - 'GENConv', - 'GCN2Conv', - 'PANConv', - 'WLConv', - 'WLConvContinuous', - 'FiLMConv', - 'SuperGATConv', - 'FAConv', - 'EGConv', - 'PDNConv', - 'GeneralConv', - 'HGTConv', - 'FastHGTConv', - 'HEATConv', - 'HeteroConv', - 'HANConv', - 'LGConv', - 'PointGNNConv', - 'GPSConv', - 'AntiSymmetricConv', -] - -classes = __all__ - -ECConv = NNConv -PointConv = PointNetConv diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/cugraph/gat_conv.py b/pytorch_geometric-2.3.1/torch_geometric/nn/conv/cugraph/gat_conv.py deleted file mode 100644 index c9e6bec..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/cugraph/gat_conv.py +++ /dev/null @@ -1,89 +0,0 @@ -from typing import Optional, Tuple - -import torch -from torch import Tensor -from torch.nn import Linear, Parameter - -from torch_geometric.nn.conv.cugraph import CuGraphModule -from torch_geometric.nn.conv.cugraph.base import LEGACY_MODE -from torch_geometric.nn.inits import zeros - -try: - if LEGACY_MODE: - from pylibcugraphops.torch.autograd import mha_gat_n2n as GATConvAgg - else: - from pylibcugraphops.pytorch.operators import mha_gat_n2n as GATConvAgg -except ImportError: - pass - - -class CuGraphGATConv(CuGraphModule): # pragma: no cover - r"""The graph attentional operator from the `"Graph Attention Networks" - `_ paper. - - :class:`CuGraphGATConv` is an optimized version of - :class:`~torch_geometric.nn.conv.GATConv` based on the :obj:`cugraph-ops` - package that fuses message passing computation for accelerated execution - and lower memory footprint. - """ - def __init__( - self, - in_channels: int, - out_channels: int, - heads: int = 1, - concat: bool = True, - negative_slope: float = 0.2, - bias: bool = True, - ): - super().__init__() - - self.in_channels = in_channels - self.out_channels = out_channels - self.heads = heads - self.concat = concat - self.negative_slope = negative_slope - - self.lin = Linear(in_channels, heads * out_channels, bias=False) - self.att = Parameter(torch.Tensor(2 * heads * out_channels)) - - if bias and concat: - self.bias = Parameter(torch.Tensor(heads * out_channels)) - elif bias and not concat: - self.bias = Parameter(torch.Tensor(out_channels)) - else: - self.register_parameter('bias', None) - - self.reset_parameters() - - def reset_parameters(self): - self.lin.reset_parameters() - gain = torch.nn.init.calculate_gain('relu') - torch.nn.init.xavier_normal_( - self.att.view(2, self.heads, self.out_channels), gain=gain) - zeros(self.bias) - - def forward( - self, - x: Tensor, - csc: Tuple[Tensor, Tensor, int], - max_num_neighbors: Optional[int] = None, - ) -> Tensor: - graph = self.get_cugraph(csc, max_num_neighbors) - - x = self.lin(x) - - if LEGACY_MODE: - out = GATConvAgg(x, self.att, graph, self.heads, 'LeakyReLU', - self.negative_slope, False, self.concat) - else: - out = GATConvAgg(x, self.att, graph, self.heads, 'LeakyReLU', - self.negative_slope, self.concat) - - if self.bias is not None: - out = out + self.bias - - return out - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.in_channels}, ' - f'{self.out_channels}, heads={self.heads})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/cugraph/rgcn_conv.py b/pytorch_geometric-2.3.1/torch_geometric/nn/conv/cugraph/rgcn_conv.py deleted file mode 100644 index 71fd632..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/cugraph/rgcn_conv.py +++ /dev/null @@ -1,113 +0,0 @@ -from typing import Optional, Tuple - -import torch -from torch import Tensor -from torch.nn import Parameter - -from torch_geometric.nn.conv.cugraph import CuGraphModule -from torch_geometric.nn.conv.cugraph.base import LEGACY_MODE -from torch_geometric.nn.inits import glorot, zeros - -try: - if LEGACY_MODE: - from pylibcugraphops.torch.autograd import \ - agg_hg_basis_n2n_post as RGCNConvAgg - else: - from pylibcugraphops.pytorch.operators import \ - agg_hg_basis_n2n_post as RGCNConvAgg -except ImportError: - pass - - -class CuGraphRGCNConv(CuGraphModule): # pragma: no cover - r"""The relational graph convolutional operator from the `"Modeling - Relational Data with Graph Convolutional Networks" - `_ paper. - - :class:`CuGraphRGCNConv` is an optimized version of - :class:`~torch_geometric.nn.conv.RGCNConv` based on the :obj:`cugraph-ops` - package that fuses message passing computation for accelerated execution - and lower memory footprint. - """ - def __init__(self, in_channels: int, out_channels: int, num_relations: int, - num_bases: Optional[int] = None, aggr: str = 'mean', - root_weight: bool = True, bias: bool = True): - super().__init__() - - if aggr not in ['sum', 'add', 'mean']: - raise ValueError(f"Aggregation function must be either 'mean' " - f"or 'sum' (got '{aggr}')") - - self.in_channels = in_channels - self.out_channels = out_channels - self.num_relations = num_relations - self.num_bases = num_bases - self.aggr = aggr - self.root_weight = root_weight - - dim_root_weight = 1 if root_weight else 0 - - if num_bases is not None: - self.weight = Parameter( - torch.Tensor(num_bases + dim_root_weight, in_channels, - out_channels)) - self.comp = Parameter(torch.Tensor(num_relations, num_bases)) - else: - self.weight = Parameter( - torch.Tensor(num_relations + dim_root_weight, in_channels, - out_channels)) - self.register_parameter('comp', None) - - if bias: - self.bias = Parameter(torch.Tensor(out_channels)) - else: - self.register_parameter('bias', None) - - self.reset_parameters() - - def reset_parameters(self): - end = -1 if self.root_weight else None - glorot(self.weight[:end]) - glorot(self.comp) - if self.root_weight: - glorot(self.weight[-1]) - zeros(self.bias) - - def forward( - self, - x: Tensor, - csc: Tuple[Tensor, Tensor, int], - edge_type: Tensor, - max_num_neighbors: Optional[int] = None, - ) -> Tensor: - r"""Runs the forward pass of the module. - - Args: - x (torch.Tensor): The node features. - csc ((torch.Tensor, torch.Tensor)): A tuple containing the CSC - representation of a graph, given as a tuple of - :obj:`(row, colptr)`. Use the :meth:`to_csc` method to convert - an :obj:`edge_index` representation to the desired format. - edge_type (torch.Tensor): The edge type. - max_num_neighbors (int, optional): The maximum number of neighbors - of a target node. It is only effective when operating in a - bipartite graph.. When not given, the value will be computed - on-the-fly, leading to slightly worse performance. - (default: :obj:`None`) - """ - graph = self.get_typed_cugraph(csc, edge_type, self.num_relations, - max_num_neighbors) - - out = RGCNConvAgg(x, self.comp, graph, concat_own=self.root_weight, - norm_by_out_degree=bool(self.aggr == 'mean')) - - out = out @ self.weight.view(-1, self.out_channels) - - if self.bias is not None: - out = out + self.bias - - return out - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.in_channels}, ' - f'{self.out_channels}, num_relations={self.num_relations})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/fast_hgt_conv.py b/pytorch_geometric-2.3.1/torch_geometric/nn/conv/fast_hgt_conv.py deleted file mode 100644 index 4a4304a..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/fast_hgt_conv.py +++ /dev/null @@ -1,256 +0,0 @@ -import math -from collections import defaultdict -from typing import Dict, List, Optional, Tuple, Union - -import torch -import torch.nn.functional as F -from torch import Tensor -from torch.nn import Parameter - -from torch_geometric.nn.conv import MessagePassing -from torch_geometric.nn.dense import HeteroDictLinear, HeteroLinear -from torch_geometric.nn.inits import ones -from torch_geometric.nn.parameter_dict import ParameterDict -from torch_geometric.typing import ( - Adj, - EdgeType, - Metadata, - NodeType, - SparseTensor, -) -from torch_geometric.utils import softmax - - -class FastHGTConv(MessagePassing): - r"""See :class:`HGTConv`.""" - def __init__( - self, - in_channels: Union[int, Dict[str, int]], - out_channels: int, - metadata: Metadata, - heads: int = 1, - **kwargs, - ): - super().__init__(aggr='add', node_dim=0, **kwargs) - - if out_channels % heads != 0: - raise ValueError(f"'out_channels' (got {out_channels}) must be " - f"divisible by the number of heads (got {heads})") - - if not isinstance(in_channels, dict): - in_channels = {node_type: in_channels for node_type in metadata[0]} - - self.in_channels = in_channels - self.out_channels = out_channels - self.heads = heads - - self.node_types = metadata[0] - self.edge_types = metadata[1] - self.src_types = [edge_type[0] for edge_type in self.edge_types] - - self.k_lin = HeteroDictLinear(self.in_channels, self.out_channels) - self.q_lin = HeteroDictLinear(self.in_channels, self.out_channels) - self.v_lin = HeteroDictLinear(self.in_channels, self.out_channels) - - self.out_lin = HeteroDictLinear(self.out_channels, self.out_channels, - types=self.node_types) - - dim = out_channels // heads - num_types = heads * len(self.edge_types) - - self.k_rel = HeteroLinear(dim, dim, num_types, is_sorted=True, - bias=False) - self.v_rel = HeteroLinear(dim, dim, num_types, is_sorted=True, - bias=False) - - self.skip = ParameterDict({ - node_type: Parameter(torch.Tensor(1)) - for node_type in self.node_types - }) - - self.p_rel = ParameterDict() - for edge_type in self.edge_types: - edge_type = '__'.join(edge_type) - self.p_rel[edge_type] = Parameter(torch.Tensor(1, heads)) - - self.reset_parameters() - - def reset_parameters(self): - super().reset_parameters() - self.k_lin.reset_parameters() - self.q_lin.reset_parameters() - self.v_lin.reset_parameters() - self.out_lin.reset_parameters() - self.k_rel.reset_parameters() - self.v_rel.reset_parameters() - ones(self.skip) - ones(self.p_rel) - - def _cat(self, x_dict: Dict[str, Tensor]) -> Tuple[Tensor, Dict[str, int]]: - """Concatenates a dictionary of features.""" - cumsum = 0 - outs: List[Tensor] = [] - offset: Dict[str, int] = {} - for key, x in x_dict.items(): - outs.append(x) - offset[key] = cumsum - cumsum += x.size(0) - return torch.cat(outs, dim=0), offset - - def _construct_src_node_feat( - self, - k_dict: Dict[str, Tensor], - v_dict: Dict[str, Tensor], - ) -> Tuple[Tensor, Tensor, Dict[EdgeType, int]]: - """Constructs the source node representations.""" - count = 0 - cumsum = 0 - H, D = self.heads, self.out_channels // self.heads - - # Flatten into a single tensor with shape [num_edge_types * heads, D]: - ks: List[Tensor] = [] - vs: List[Tensor] = [] - type_list: List[int] = [] - offset: Dict[EdgeType] = {} - for edge_type in self.edge_types: - src, _, _ = edge_type - - ks.append(k_dict[src].view(-1, D)) - vs.append(v_dict[src].view(-1, D)) - - N = k_dict[src].size(0) - for _ in range(H): - type_list.append(torch.full((N, ), count, dtype=torch.long)) - count += 1 - offset[edge_type] = cumsum - cumsum += N - - type_vec = torch.cat(type_list, dim=0) - k = self.k_rel(torch.cat(ks, dim=0), type_vec).view(-1, H, D) - v = self.v_rel(torch.cat(vs, dim=0), type_vec).view(-1, H, D) - - return k, v, offset - - def _construct_edge_index( - self, - edge_index_dict: Dict[EdgeType, Adj], - src_offset: Dict[EdgeType, int], - dst_offset: [NodeType, int], - ) -> Tuple[Adj, Tensor]: - """Constructs a tensor of edge indices by concatenating edge indices - for each edge type. The edge indices are increased by the offset of the - source and destination nodes.""" - edge_indices: List[Tensor] = [] - ps: List[Tensor] = [] - - for edge_type in self.edge_types: - _, _, dst_type = edge_type - - edge_index = edge_index_dict[edge_type] - - # (TODO) Add support for SparseTensor w/o converting. - is_sparse = isinstance(edge_index, SparseTensor) - if is_sparse: # Convert to COO - dst, src, _ = edge_index.coo() - edge_index = torch.stack([src, dst], dim=0) - else: - edge_index = edge_index.clone() - - p = self.p_rel['__'.join(edge_type)].expand(edge_index.size(1), -1) - ps.append(p) - - # Add offset to edge indices: - edge_index[0] += src_offset[edge_type] - edge_index[1] += dst_offset[dst_type] - edge_indices.append(edge_index) - - # Concatenate all edges and edge tensors: - p = torch.cat(ps, dim=0) - edge_index = torch.cat(edge_indices, dim=1) - - if is_sparse: - edge_index = SparseTensor(row=edge_index[1], col=edge_index[0], - value=p) - - return edge_index, p - - def forward( - self, - x_dict: Dict[NodeType, Tensor], - edge_index_dict: Dict[EdgeType, Adj] # Support both. - ) -> Dict[NodeType, Optional[Tensor]]: - r"""Runs the forward pass of the module. - - Args: - x_dict (Dict[str, torch.Tensor]): A dictionary holding input node - features for each individual node type. - edge_index_dict (Dict[Tuple[str, str, str], torch.Tensor]): A - dictionary holding graph connectivity information for each - individual edge type, either as a :class:`torch.Tensor` of - shape :obj:`[2, num_edges]` or a - :class:`torch_sparse.SparseTensor`. - - :rtype: :obj:`Dict[str, Optional[torch.Tensor]]` - The output node - embeddings for each node type. - In case a node type does not receive any message, its output will - be set to :obj:`None`. - """ - H, D = self.heads, self.out_channels // self.heads - - k_dict, q_dict, v_dict = {}, {}, {} - out_dict = defaultdict(list) - - # Compute K, Q, V over node types: - k_dict = self.k_lin(x_dict) - k_dict = {k: v.view(-1, H, D) for k, v in k_dict.items()} - - q_dict = self.q_lin(x_dict) - q_dict = {k: v.view(-1, H, D) for k, v in q_dict.items()} - - v_dict = self.v_lin(x_dict) - v_dict = {k: v.view(-1, H, D) for k, v in v_dict.items()} - - q, dst_offset = self._cat(q_dict) - k, v, src_offset = self._construct_src_node_feat(k_dict, v_dict) - - edge_index, edge_attr = self._construct_edge_index( - edge_index_dict, src_offset, dst_offset) - - out = self.propagate(edge_index, k=k, q=q, v=v, edge_attr=edge_attr, - size=None) - - # Reconstruct output node embeddings dict: - for node_type, start_offset in dst_offset.items(): - end_offset = start_offset + q_dict[node_type].size(0) - out_dict[node_type] = out[start_offset:end_offset] - - # Transform output node embeddings: - a_dict = self.out_lin({k: F.gelu(v) for k, v in out_dict.items()}) - - # Iterate over node types: - for node_type, out in out_dict.items(): - if out is None: - out_dict[node_type] = None - continue - else: - out = a_dict[node_type] - - if out.size(-1) == x_dict[node_type].size(-1): - alpha = self.skip[node_type].sigmoid() - out = alpha * out + (1 - alpha) * x_dict[node_type] - out_dict[node_type] = out - - return out_dict - - def message(self, k_j: Tensor, q_i: Tensor, v_j: Tensor, edge_attr: Tensor, - index: Tensor, ptr: Optional[Tensor], - size_i: Optional[int]) -> Tensor: - alpha = (q_i * k_j).sum(dim=-1) * edge_attr - alpha = alpha / math.sqrt(q_i.size(-1)) - alpha = softmax(alpha, index, ptr, size_i) - out = v_j * alpha.view(-1, self.heads, 1) - return out.view(-1, self.out_channels) - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}(-1, {self.out_channels}, ' - f'heads={self.heads})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/gat_conv.py b/pytorch_geometric-2.3.1/torch_geometric/nn/conv/gat_conv.py deleted file mode 100644 index e11d62e..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/gat_conv.py +++ /dev/null @@ -1,304 +0,0 @@ -from typing import Optional, Tuple, Union - -import torch -import torch.nn.functional as F -from torch import Tensor -from torch.nn import Parameter - -from torch_geometric.nn.conv import MessagePassing -from torch_geometric.nn.dense.linear import Linear -from torch_geometric.nn.inits import glorot, zeros -from torch_geometric.typing import NoneType # noqa -from torch_geometric.typing import ( - Adj, - OptPairTensor, - OptTensor, - Size, - SparseTensor, - torch_sparse, -) -from torch_geometric.utils import ( - add_self_loops, - is_torch_sparse_tensor, - remove_self_loops, - softmax, -) -from torch_geometric.utils.sparse import set_sparse_value - - -class GATConv(MessagePassing): - r"""The graph attentional operator from the `"Graph Attention Networks" - `_ paper - - .. math:: - \mathbf{x}^{\prime}_i = \alpha_{i,i}\mathbf{\Theta}\mathbf{x}_{i} + - \sum_{j \in \mathcal{N}(i)} \alpha_{i,j}\mathbf{\Theta}\mathbf{x}_{j}, - - where the attention coefficients :math:`\alpha_{i,j}` are computed as - - .. math:: - \alpha_{i,j} = - \frac{ - \exp\left(\mathrm{LeakyReLU}\left(\mathbf{a}^{\top} - [\mathbf{\Theta}\mathbf{x}_i \, \Vert \, \mathbf{\Theta}\mathbf{x}_j] - \right)\right)} - {\sum_{k \in \mathcal{N}(i) \cup \{ i \}} - \exp\left(\mathrm{LeakyReLU}\left(\mathbf{a}^{\top} - [\mathbf{\Theta}\mathbf{x}_i \, \Vert \, \mathbf{\Theta}\mathbf{x}_k] - \right)\right)}. - - If the graph has multi-dimensional edge features :math:`\mathbf{e}_{i,j}`, - the attention coefficients :math:`\alpha_{i,j}` are computed as - - .. math:: - \alpha_{i,j} = - \frac{ - \exp\left(\mathrm{LeakyReLU}\left(\mathbf{a}^{\top} - [\mathbf{\Theta}\mathbf{x}_i \, \Vert \, \mathbf{\Theta}\mathbf{x}_j - \, \Vert \, \mathbf{\Theta}_{e} \mathbf{e}_{i,j}]\right)\right)} - {\sum_{k \in \mathcal{N}(i) \cup \{ i \}} - \exp\left(\mathrm{LeakyReLU}\left(\mathbf{a}^{\top} - [\mathbf{\Theta}\mathbf{x}_i \, \Vert \, \mathbf{\Theta}\mathbf{x}_k - \, \Vert \, \mathbf{\Theta}_{e} \mathbf{e}_{i,k}]\right)\right)}. - - Args: - in_channels (int or tuple): Size of each input sample, or :obj:`-1` to - derive the size from the first input(s) to the forward method. - A tuple corresponds to the sizes of source and target - dimensionalities. - out_channels (int): Size of each output sample. - heads (int, optional): Number of multi-head-attentions. - (default: :obj:`1`) - concat (bool, optional): If set to :obj:`False`, the multi-head - attentions are averaged instead of concatenated. - (default: :obj:`True`) - negative_slope (float, optional): LeakyReLU angle of the negative - slope. (default: :obj:`0.2`) - dropout (float, optional): Dropout probability of the normalized - attention coefficients which exposes each node to a stochastically - sampled neighborhood during training. (default: :obj:`0`) - add_self_loops (bool, optional): If set to :obj:`False`, will not add - self-loops to the input graph. (default: :obj:`True`) - edge_dim (int, optional): Edge feature dimensionality (in case - there are any). (default: :obj:`None`) - fill_value (float or torch.Tensor or str, optional): The way to - generate edge features of self-loops (in case - :obj:`edge_dim != None`). - If given as :obj:`float` or :class:`torch.Tensor`, edge features of - self-loops will be directly given by :obj:`fill_value`. - If given as :obj:`str`, edge features of self-loops are computed by - aggregating all features of edges that point to the specific node, - according to a reduce operation. (:obj:`"add"`, :obj:`"mean"`, - :obj:`"min"`, :obj:`"max"`, :obj:`"mul"`). (default: :obj:`"mean"`) - bias (bool, optional): If set to :obj:`False`, the layer will not learn - an additive bias. (default: :obj:`True`) - **kwargs (optional): Additional arguments of - :class:`torch_geometric.nn.conv.MessagePassing`. - - Shapes: - - **input:** - node features :math:`(|\mathcal{V}|, F_{in})` or - :math:`((|\mathcal{V_s}|, F_{s}), (|\mathcal{V_t}|, F_{t}))` - if bipartite, - edge indices :math:`(2, |\mathcal{E}|)`, - edge features :math:`(|\mathcal{E}|, D)` *(optional)* - - **output:** node features :math:`(|\mathcal{V}|, H * F_{out})` or - :math:`((|\mathcal{V}_t|, H * F_{out})` if bipartite. - If :obj:`return_attention_weights=True`, then - :math:`((|\mathcal{V}|, H * F_{out}), - ((2, |\mathcal{E}|), (|\mathcal{E}|, H)))` - or :math:`((|\mathcal{V_t}|, H * F_{out}), ((2, |\mathcal{E}|), - (|\mathcal{E}|, H)))` if bipartite - """ - def __init__( - self, - in_channels: Union[int, Tuple[int, int]], - out_channels: int, - heads: int = 1, - concat: bool = True, - negative_slope: float = 0.2, - dropout: float = 0.0, - add_self_loops: bool = True, - edge_dim: Optional[int] = None, - fill_value: Union[float, Tensor, str] = 'mean', - bias: bool = True, - **kwargs, - ): - kwargs.setdefault('aggr', 'add') - super().__init__(node_dim=0, **kwargs) - - self.in_channels = in_channels - self.out_channels = out_channels - self.heads = heads - self.concat = concat - self.negative_slope = negative_slope - self.dropout = dropout - self.add_self_loops = add_self_loops - self.edge_dim = edge_dim - self.fill_value = fill_value - - # In case we are operating in bipartite graphs, we apply separate - # transformations 'lin_src' and 'lin_dst' to source and target nodes: - if isinstance(in_channels, int): - self.lin_src = Linear(in_channels, heads * out_channels, - bias=False, weight_initializer='glorot') - self.lin_dst = self.lin_src - else: - self.lin_src = Linear(in_channels[0], heads * out_channels, False, - weight_initializer='glorot') - self.lin_dst = Linear(in_channels[1], heads * out_channels, False, - weight_initializer='glorot') - - # The learnable parameters to compute attention coefficients: - self.att_src = Parameter(torch.Tensor(1, heads, out_channels)) - self.att_dst = Parameter(torch.Tensor(1, heads, out_channels)) - - if edge_dim is not None: - self.lin_edge = Linear(edge_dim, heads * out_channels, bias=False, - weight_initializer='glorot') - self.att_edge = Parameter(torch.Tensor(1, heads, out_channels)) - else: - self.lin_edge = None - self.register_parameter('att_edge', None) - - if bias and concat: - self.bias = Parameter(torch.Tensor(heads * out_channels)) - elif bias and not concat: - self.bias = Parameter(torch.Tensor(out_channels)) - else: - self.register_parameter('bias', None) - - self.reset_parameters() - - def reset_parameters(self): - super().reset_parameters() - self.lin_src.reset_parameters() - self.lin_dst.reset_parameters() - if self.lin_edge is not None: - self.lin_edge.reset_parameters() - glorot(self.att_src) - glorot(self.att_dst) - glorot(self.att_edge) - zeros(self.bias) - - def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj, - edge_attr: OptTensor = None, size: Size = None, - return_attention_weights=None): - # type: (Union[Tensor, OptPairTensor], Tensor, OptTensor, Size, NoneType) -> Tensor # noqa - # type: (Union[Tensor, OptPairTensor], SparseTensor, OptTensor, Size, NoneType) -> Tensor # noqa - # type: (Union[Tensor, OptPairTensor], Tensor, OptTensor, Size, bool) -> Tuple[Tensor, Tuple[Tensor, Tensor]] # noqa - # type: (Union[Tensor, OptPairTensor], SparseTensor, OptTensor, Size, bool) -> Tuple[Tensor, SparseTensor] # noqa - r"""Runs the forward pass of the module. - - Args: - return_attention_weights (bool, optional): If set to :obj:`True`, - will additionally return the tuple - :obj:`(edge_index, attention_weights)`, holding the computed - attention weights for each edge. (default: :obj:`None`) - """ - # NOTE: attention weights will be returned whenever - # `return_attention_weights` is set to a value, regardless of its - # actual value (might be `True` or `False`). This is a current somewhat - # hacky workaround to allow for TorchScript support via the - # `torch.jit._overload` decorator, as we can only change the output - # arguments conditioned on type (`None` or `bool`), not based on its - # actual value. - - H, C = self.heads, self.out_channels - - # We first transform the input node features. If a tuple is passed, we - # transform source and target node features via separate weights: - if isinstance(x, Tensor): - assert x.dim() == 2, "Static graphs not supported in 'GATConv'" - x_src = x_dst = self.lin_src(x).view(-1, H, C) - else: # Tuple of source and target node features: - x_src, x_dst = x - assert x_src.dim() == 2, "Static graphs not supported in 'GATConv'" - x_src = self.lin_src(x_src).view(-1, H, C) - if x_dst is not None: - x_dst = self.lin_dst(x_dst).view(-1, H, C) - - x = (x_src, x_dst) - - # Next, we compute node-level attention coefficients, both for source - # and target nodes (if present): - alpha_src = (x_src * self.att_src).sum(dim=-1) - alpha_dst = None if x_dst is None else (x_dst * self.att_dst).sum(-1) - alpha = (alpha_src, alpha_dst) - - if self.add_self_loops: - if isinstance(edge_index, Tensor): - # We only want to add self-loops for nodes that appear both as - # source and target nodes: - num_nodes = x_src.size(0) - if x_dst is not None: - num_nodes = min(num_nodes, x_dst.size(0)) - num_nodes = min(size) if size is not None else num_nodes - edge_index, edge_attr = remove_self_loops( - edge_index, edge_attr) - edge_index, edge_attr = add_self_loops( - edge_index, edge_attr, fill_value=self.fill_value, - num_nodes=num_nodes) - elif isinstance(edge_index, SparseTensor): - if self.edge_dim is None: - edge_index = torch_sparse.set_diag(edge_index) - else: - raise NotImplementedError( - "The usage of 'edge_attr' and 'add_self_loops' " - "simultaneously is currently not yet supported for " - "'edge_index' in a 'SparseTensor' form") - - # edge_updater_type: (alpha: OptPairTensor, edge_attr: OptTensor) - alpha = self.edge_updater(edge_index, alpha=alpha, edge_attr=edge_attr) - - # propagate_type: (x: OptPairTensor, alpha: Tensor) - out = self.propagate(edge_index, x=x, alpha=alpha, size=size) - - if self.concat: - out = out.view(-1, self.heads * self.out_channels) - else: - out = out.mean(dim=1) - - if self.bias is not None: - out = out + self.bias - - if isinstance(return_attention_weights, bool): - if isinstance(edge_index, Tensor): - if is_torch_sparse_tensor(edge_index): - # TODO TorchScript requires to return a tuple - adj = set_sparse_value(edge_index, alpha) - return out, (adj, alpha) - else: - return out, (edge_index, alpha) - elif isinstance(edge_index, SparseTensor): - return out, edge_index.set_value(alpha, layout='coo') - else: - return out - - def edge_update(self, alpha_j: Tensor, alpha_i: OptTensor, - edge_attr: OptTensor, index: Tensor, ptr: OptTensor, - size_i: Optional[int]) -> Tensor: - # Given edge-level attention coefficients for source and target nodes, - # we simply need to sum them up to "emulate" concatenation: - alpha = alpha_j if alpha_i is None else alpha_j + alpha_i - if index.numel() == 0: - return alpha - if edge_attr is not None and self.lin_edge is not None: - if edge_attr.dim() == 1: - edge_attr = edge_attr.view(-1, 1) - edge_attr = self.lin_edge(edge_attr) - edge_attr = edge_attr.view(-1, self.heads, self.out_channels) - alpha_edge = (edge_attr * self.att_edge).sum(dim=-1) - alpha = alpha + alpha_edge - - alpha = F.leaky_relu(alpha, self.negative_slope) - alpha = softmax(alpha, index, ptr, size_i) - alpha = F.dropout(alpha, p=self.dropout, training=self.training) - return alpha - - def message(self, x_j: Tensor, alpha: Tensor) -> Tensor: - return alpha.unsqueeze(-1) * x_j - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.in_channels}, ' - f'{self.out_channels}, heads={self.heads})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/hgt_conv.py b/pytorch_geometric-2.3.1/torch_geometric/nn/conv/hgt_conv.py deleted file mode 100644 index 1f5bc86..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/hgt_conv.py +++ /dev/null @@ -1,204 +0,0 @@ -import math -from typing import Dict, List, Optional, Union - -import torch -import torch.nn.functional as F -from torch import Tensor -from torch.nn import Parameter - -from torch_geometric.nn.conv import MessagePassing -from torch_geometric.nn.dense import Linear -from torch_geometric.nn.inits import glorot, ones, reset -from torch_geometric.nn.module_dict import ModuleDict -from torch_geometric.nn.parameter_dict import ParameterDict -from torch_geometric.typing import EdgeType, Metadata, NodeType, SparseTensor -from torch_geometric.utils import softmax - - -def group(xs: List[Tensor], aggr: Optional[str]) -> Optional[Tensor]: - if len(xs) == 0: - return None - elif aggr is None: - return torch.stack(xs, dim=1) - elif len(xs) == 1: - return xs[0] - elif aggr == "cat": - return torch.cat(xs, dim=-1) - else: - out = torch.stack(xs, dim=0) - out = getattr(torch, aggr)(out, dim=0) - out = out[0] if isinstance(out, tuple) else out - return out - - -class HGTConv(MessagePassing): - r"""The Heterogeneous Graph Transformer (HGT) operator from the - `"Heterogeneous Graph Transformer" `_ - paper. - - .. note:: - - For an example of using HGT, see `examples/hetero/hgt_dblp.py - `_. - - .. note:: - - For a faster alternative, use :class:`FastHGTConv` which does not - iterate over individual node and edge types. - - Args: - in_channels (int or Dict[str, int]): Size of each input sample of every - node type, or :obj:`-1` to derive the size from the first input(s) - to the forward method. - out_channels (int): Size of each output sample. - metadata (Tuple[List[str], List[Tuple[str, str, str]]]): The metadata - of the heterogeneous graph, *i.e.* its node and edge types given - by a list of strings and a list of string triplets, respectively. - See :meth:`torch_geometric.data.HeteroData.metadata` for more - information. - heads (int, optional): Number of multi-head-attentions. - (default: :obj:`1`) - group (str, optional): The aggregation scheme to use for grouping node - embeddings generated by different relations - (:obj:`"sum"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`). - (default: :obj:`"sum"`) - **kwargs (optional): Additional arguments of - :class:`torch_geometric.nn.conv.MessagePassing`. - """ - def __init__( - self, - in_channels: Union[int, Dict[str, int]], - out_channels: int, - metadata: Metadata, - heads: int = 1, - group: str = "sum", - **kwargs, - ): - super().__init__(aggr='add', node_dim=0, **kwargs) - - if out_channels % heads != 0: - raise ValueError(f"'out_channels' (got {out_channels}) must be " - f"divisible by the number of heads (got {heads})") - - if not isinstance(in_channels, dict): - in_channels = {node_type: in_channels for node_type in metadata[0]} - - self.in_channels = in_channels - self.out_channels = out_channels - self.heads = heads - self.group = group - - self.k_lin = ModuleDict() - self.q_lin = ModuleDict() - self.v_lin = ModuleDict() - self.a_lin = ModuleDict() - self.skip = ParameterDict() - for node_type, in_channels in self.in_channels.items(): - self.k_lin[node_type] = Linear(in_channels, out_channels) - self.q_lin[node_type] = Linear(in_channels, out_channels) - self.v_lin[node_type] = Linear(in_channels, out_channels) - self.a_lin[node_type] = Linear(out_channels, out_channels) - self.skip[node_type] = Parameter(torch.Tensor(1)) - - self.a_rel = ParameterDict() - self.m_rel = ParameterDict() - self.p_rel = ParameterDict() - dim = out_channels // heads - for edge_type in metadata[1]: - edge_type = '__'.join(edge_type) - self.a_rel[edge_type] = Parameter(torch.Tensor(heads, dim, dim)) - self.m_rel[edge_type] = Parameter(torch.Tensor(heads, dim, dim)) - self.p_rel[edge_type] = Parameter(torch.Tensor(heads)) - - self.reset_parameters() - - def reset_parameters(self): - super().reset_parameters() - reset(self.k_lin) - reset(self.q_lin) - reset(self.v_lin) - reset(self.a_lin) - ones(self.skip) - ones(self.p_rel) - glorot(self.a_rel) - glorot(self.m_rel) - - def forward( - self, - x_dict: Dict[NodeType, Tensor], - edge_index_dict: Union[Dict[EdgeType, Tensor], - Dict[EdgeType, SparseTensor]] # Support both. - ) -> Dict[NodeType, Optional[Tensor]]: - r"""Runs the forward pass of the module. - - Args: - x_dict (Dict[str, torch.Tensor]): A dictionary holding input node - features for each individual node type. - edge_index_dict (Dict[Tuple[str, str, str], torch.Tensor]): A - dictionary holding graph connectivity information for each - individual edge type, either as a :class:`torch.Tensor` of - shape :obj:`[2, num_edges]` or a - :class:`torch_sparse.SparseTensor`. - - :rtype: :obj:`Dict[str, Optional[torch.Tensor]]` - The output node - embeddings for each node type. - In case a node type does not receive any message, its output will - be set to :obj:`None`. - """ - H, D = self.heads, self.out_channels // self.heads - - k_dict, q_dict, v_dict, out_dict = {}, {}, {}, {} - - # Iterate over node-types: - for node_type, x in x_dict.items(): - k_dict[node_type] = self.k_lin[node_type](x).view(-1, H, D) - q_dict[node_type] = self.q_lin[node_type](x).view(-1, H, D) - v_dict[node_type] = self.v_lin[node_type](x).view(-1, H, D) - out_dict[node_type] = [] - - # Iterate over edge-types: - for edge_type, edge_index in edge_index_dict.items(): - src_type, _, dst_type = edge_type - edge_type = '__'.join(edge_type) - - a_rel = self.a_rel[edge_type] - k = (k_dict[src_type].transpose(0, 1) @ a_rel).transpose(1, 0) - - m_rel = self.m_rel[edge_type] - v = (v_dict[src_type].transpose(0, 1) @ m_rel).transpose(1, 0) - - # propagate_type: (k: Tensor, q: Tensor, v: Tensor, rel: Tensor) - out = self.propagate(edge_index, k=k, q=q_dict[dst_type], v=v, - rel=self.p_rel[edge_type], size=None) - out_dict[dst_type].append(out) - - # Iterate over node-types: - for node_type, outs in out_dict.items(): - out = group(outs, self.group) - - if out is None: - out_dict[node_type] = None - continue - - out = self.a_lin[node_type](F.gelu(out)) - if out.size(-1) == x_dict[node_type].size(-1): - alpha = self.skip[node_type].sigmoid() - out = alpha * out + (1 - alpha) * x_dict[node_type] - out_dict[node_type] = out - - return out_dict - - def message(self, k_j: Tensor, q_i: Tensor, v_j: Tensor, rel: Tensor, - index: Tensor, ptr: Optional[Tensor], - size_i: Optional[int]) -> Tensor: - - alpha = (q_i * k_j).sum(dim=-1) * rel - alpha = alpha / math.sqrt(q_i.size(-1)) - alpha = softmax(alpha, index, ptr, size_i) - out = v_j * alpha.view(-1, self.heads, 1) - return out.view(-1, self.out_channels) - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}(-1, {self.out_channels}, ' - f'heads={self.heads})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/res_gated_graph_conv.py b/pytorch_geometric-2.3.1/torch_geometric/nn/conv/res_gated_graph_conv.py deleted file mode 100644 index f9bd241..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/res_gated_graph_conv.py +++ /dev/null @@ -1,120 +0,0 @@ -from typing import Callable, Optional, Tuple, Union - -from torch import Tensor -from torch.nn import Parameter, Sigmoid - -from torch_geometric.nn.conv import MessagePassing -from torch_geometric.nn.dense.linear import Linear -from torch_geometric.nn.inits import zeros -from torch_geometric.typing import Adj, PairTensor - - -class ResGatedGraphConv(MessagePassing): - r"""The residual gated graph convolutional operator from the - `"Residual Gated Graph ConvNets" `_ paper - - .. math:: - \mathbf{x}^{\prime}_i = \mathbf{W}_1 \mathbf{x}_i + - \sum_{j \in \mathcal{N}(i)} \eta_{i,j} \odot \mathbf{W}_2 \mathbf{x}_j - - where the gate :math:`\eta_{i,j}` is defined as - - .. math:: - \eta_{i,j} = \sigma(\mathbf{W}_3 \mathbf{x}_i + \mathbf{W}_4 - \mathbf{x}_j) - - with :math:`\sigma` denoting the sigmoid function. - - Args: - in_channels (int or tuple): Size of each input sample, or :obj:`-1` to - derive the size from the first input(s) to the forward method. - A tuple corresponds to the sizes of source and target - dimensionalities. - out_channels (int): Size of each output sample. - act (callable, optional): Gating function :math:`\sigma`. - (default: :meth:`torch.nn.Sigmoid()`) - bias (bool, optional): If set to :obj:`False`, the layer will not learn - an additive bias. (default: :obj:`True`) - root_weight (bool, optional): If set to :obj:`False`, the layer will - not add transformed root node features to the output. - (default: :obj:`True`) - **kwargs (optional): Additional arguments of - :class:`torch_geometric.nn.conv.MessagePassing`. - - Shapes: - - **inputs:** - node features :math:`(|\mathcal{V}|, F_{in})` or - :math:`((|\mathcal{V_s}|, F_{s}), (|\mathcal{V_t}|, F_{t}))` - if bipartite, - edge indices :math:`(2, |\mathcal{E}|)` - - **outputs:** node features :math:`(|\mathcal{V}|, F_{out})` or - :math:`(|\mathcal{V_t}|, F_{out})` if bipartite - """ - def __init__( - self, - in_channels: Union[int, Tuple[int, int]], - out_channels: int, - act: Optional[Callable] = Sigmoid(), - root_weight: bool = True, - bias: bool = True, - **kwargs, - ): - - kwargs.setdefault('aggr', 'add') - super().__init__(**kwargs) - - self.in_channels = in_channels - self.out_channels = out_channels - self.act = act - self.root_weight = root_weight - - if isinstance(in_channels, int): - in_channels = (in_channels, in_channels) - - self.lin_key = Linear(in_channels[1], out_channels) - self.lin_query = Linear(in_channels[0], out_channels) - self.lin_value = Linear(in_channels[0], out_channels) - - if root_weight: - self.lin_skip = Linear(in_channels[1], out_channels, bias=False) - else: - self.register_parameter('lin_skip', None) - - if bias: - self.bias = Parameter(Tensor(out_channels)) - else: - self.register_parameter('bias', None) - - self.reset_parameters() - - def reset_parameters(self): - super().reset_parameters() - self.lin_key.reset_parameters() - self.lin_query.reset_parameters() - self.lin_value.reset_parameters() - if self.lin_skip is not None: - self.lin_skip.reset_parameters() - if self.bias is not None: - zeros(self.bias) - - def forward(self, x: Union[Tensor, PairTensor], edge_index: Adj) -> Tensor: - if isinstance(x, Tensor): - x: PairTensor = (x, x) - - k = self.lin_key(x[1]) - q = self.lin_query(x[0]) - v = self.lin_value(x[0]) - - # propagate_type: (k: Tensor, q: Tensor, v: Tensor) - out = self.propagate(edge_index, k=k, q=q, v=v, size=None) - - if self.root_weight: - out = out + self.lin_skip(x[1]) - - if self.bias is not None: - out = out + self.bias - - return out - - def message(self, k_i: Tensor, q_j: Tensor, v_j: Tensor) -> Tensor: - return self.act(k_i + q_j) * v_j diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/rgcn_conv.py b/pytorch_geometric-2.3.1/torch_geometric/nn/conv/rgcn_conv.py deleted file mode 100644 index 938f687..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/rgcn_conv.py +++ /dev/null @@ -1,359 +0,0 @@ -from typing import Optional, Tuple, Union - -import torch -from torch import Tensor -from torch.nn import Parameter -from torch.nn import Parameter as Param - -import torch_geometric.typing -from torch_geometric.nn.conv import MessagePassing -from torch_geometric.nn.inits import glorot, zeros -from torch_geometric.typing import ( - Adj, - OptTensor, - SparseTensor, - pyg_lib, - torch_sparse, -) -from torch_geometric.utils import index_sort, one_hot, scatter, spmm -from torch_geometric.utils.sparse import index2ptr - - -@torch.jit._overload -def masked_edge_index(edge_index, edge_mask): - # type: (Tensor, Tensor) -> Tensor - pass - - -@torch.jit._overload -def masked_edge_index(edge_index, edge_mask): - # type: (SparseTensor, Tensor) -> SparseTensor - pass - - -def masked_edge_index(edge_index, edge_mask): - if isinstance(edge_index, Tensor): - return edge_index[:, edge_mask] - return torch_sparse.masked_select_nnz(edge_index, edge_mask, layout='coo') - - -class RGCNConv(MessagePassing): - r"""The relational graph convolutional operator from the `"Modeling - Relational Data with Graph Convolutional Networks" - `_ paper - - .. math:: - \mathbf{x}^{\prime}_i = \mathbf{\Theta}_{\textrm{root}} \cdot - \mathbf{x}_i + \sum_{r \in \mathcal{R}} \sum_{j \in \mathcal{N}_r(i)} - \frac{1}{|\mathcal{N}_r(i)|} \mathbf{\Theta}_r \cdot \mathbf{x}_j, - - where :math:`\mathcal{R}` denotes the set of relations, *i.e.* edge types. - Edge type needs to be a one-dimensional :obj:`torch.long` tensor which - stores a relation identifier - :math:`\in \{ 0, \ldots, |\mathcal{R}| - 1\}` for each edge. - - .. note:: - This implementation is as memory-efficient as possible by iterating - over each individual relation type. - Therefore, it may result in low GPU utilization in case the graph has a - large number of relations. - As an alternative approach, :class:`FastRGCNConv` does not iterate over - each individual type, but may consume a large amount of memory to - compensate. - We advise to check out both implementations to see which one fits your - needs. - - .. note:: - :class:`RGCNConv` can use `dynamic shapes - `_, which means that the shape of the interim - tensors can be determined at runtime. - If your device doesn't support dynamic shapes, use - :class:`FastRGCNConv` instead. - - Args: - in_channels (int or tuple): Size of each input sample. A tuple - corresponds to the sizes of source and target dimensionalities. - In case no input features are given, this argument should - correspond to the number of nodes in your graph. - out_channels (int): Size of each output sample. - num_relations (int): Number of relations. - num_bases (int, optional): If set, this layer will use the - basis-decomposition regularization scheme where :obj:`num_bases` - denotes the number of bases to use. (default: :obj:`None`) - num_blocks (int, optional): If set, this layer will use the - block-diagonal-decomposition regularization scheme where - :obj:`num_blocks` denotes the number of blocks to use. - (default: :obj:`None`) - aggr (str, optional): The aggregation scheme to use - (:obj:`"add"`, :obj:`"mean"`, :obj:`"max"`). - (default: :obj:`"mean"`) - root_weight (bool, optional): If set to :obj:`False`, the layer will - not add transformed root node features to the output. - (default: :obj:`True`) - is_sorted (bool, optional): If set to :obj:`True`, assumes that - :obj:`edge_index` is sorted by :obj:`edge_type`. This avoids - internal re-sorting of the data and can improve runtime and memory - efficiency. (default: :obj:`False`) - bias (bool, optional): If set to :obj:`False`, the layer will not learn - an additive bias. (default: :obj:`True`) - **kwargs (optional): Additional arguments of - :class:`torch_geometric.nn.conv.MessagePassing`. - """ - def __init__( - self, - in_channels: Union[int, Tuple[int, int]], - out_channels: int, - num_relations: int, - num_bases: Optional[int] = None, - num_blocks: Optional[int] = None, - aggr: str = 'mean', - root_weight: bool = True, - is_sorted: bool = False, - bias: bool = True, - **kwargs, - ): - kwargs.setdefault('aggr', aggr) - super().__init__(node_dim=0, **kwargs) - - if num_bases is not None and num_blocks is not None: - raise ValueError('Can not apply both basis-decomposition and ' - 'block-diagonal-decomposition at the same time.') - - self.in_channels = in_channels - self.out_channels = out_channels - self.num_relations = num_relations - self.num_bases = num_bases - self.num_blocks = num_blocks - self.is_sorted = is_sorted - - if isinstance(in_channels, int): - in_channels = (in_channels, in_channels) - self.in_channels_l = in_channels[0] - - if num_bases is not None: - self.weight = Parameter( - torch.Tensor(num_bases, in_channels[0], out_channels)) - self.comp = Parameter(torch.Tensor(num_relations, num_bases)) - - elif num_blocks is not None: - assert (in_channels[0] % num_blocks == 0 - and out_channels % num_blocks == 0) - self.weight = Parameter( - torch.Tensor(num_relations, num_blocks, - in_channels[0] // num_blocks, - out_channels // num_blocks)) - self.register_parameter('comp', None) - - else: - self.weight = Parameter( - torch.Tensor(num_relations, in_channels[0], out_channels)) - self.register_parameter('comp', None) - - if root_weight: - self.root = Param(torch.Tensor(in_channels[1], out_channels)) - else: - self.register_parameter('root', None) - - if bias: - self.bias = Param(torch.Tensor(out_channels)) - else: - self.register_parameter('bias', None) - - self.reset_parameters() - - def reset_parameters(self): - super().reset_parameters() - glorot(self.weight) - glorot(self.comp) - glorot(self.root) - zeros(self.bias) - - def forward(self, x: Union[OptTensor, Tuple[OptTensor, Tensor]], - edge_index: Adj, edge_type: OptTensor = None): - r"""Runs the forward pass of the module. - - Args: - x (torch.Tensor or tuple, optional): The input node features. - Can be either a :obj:`[num_nodes, in_channels]` node feature - matrix, or an optional one-dimensional node index tensor (in - which case input features are treated as trainable node - embeddings). - Furthermore, :obj:`x` can be of type :obj:`tuple` denoting - source and destination node features. - edge_index (torch.Tensor or SparseTensor): The edge indices. - edge_type (torch.Tensor, optional): The one-dimensional relation - type/index for each edge in :obj:`edge_index`. - Should be only :obj:`None` in case :obj:`edge_index` is of type - :class:`torch_sparse.SparseTensor`. (default: :obj:`None`) - """ - # Convert input features to a pair of node features or node indices. - x_l: OptTensor = None - if isinstance(x, tuple): - x_l = x[0] - else: - x_l = x - if x_l is None: - x_l = torch.arange(self.in_channels_l, device=self.weight.device) - - x_r: Tensor = x_l - if isinstance(x, tuple): - x_r = x[1] - - size = (x_l.size(0), x_r.size(0)) - - if isinstance(edge_index, SparseTensor): - edge_type = edge_index.storage.value() - assert edge_type is not None - - # propagate_type: (x: Tensor, edge_type_ptr: OptTensor) - out = torch.zeros(x_r.size(0), self.out_channels, device=x_r.device) - - weight = self.weight - if self.num_bases is not None: # Basis-decomposition ================= - weight = (self.comp @ weight.view(self.num_bases, -1)).view( - self.num_relations, self.in_channels_l, self.out_channels) - - if self.num_blocks is not None: # Block-diagonal-decomposition ===== - - if not torch.is_floating_point( - x_r) and self.num_blocks is not None: - raise ValueError('Block-diagonal decomposition not supported ' - 'for non-continuous input features.') - - for i in range(self.num_relations): - tmp = masked_edge_index(edge_index, edge_type == i) - h = self.propagate(tmp, x=x_l, edge_type_ptr=None, size=size) - h = h.view(-1, weight.size(1), weight.size(2)) - h = torch.einsum('abc,bcd->abd', h, weight[i]) - out = out + h.contiguous().view(-1, self.out_channels) - - else: # No regularization/Basis-decomposition ======================== - if (torch_geometric.typing.WITH_PYG_LIB and self.num_bases is None - and x_l.is_floating_point() - and isinstance(edge_index, Tensor)): - if not self.is_sorted: - if (edge_type[1:] < edge_type[:-1]).any(): - edge_type, perm = index_sort( - edge_type, max_value=self.num_relations) - edge_index = edge_index[:, perm] - edge_type_ptr = index2ptr(edge_type, self.num_relations) - out = self.propagate(edge_index, x=x_l, - edge_type_ptr=edge_type_ptr, size=size) - else: - for i in range(self.num_relations): - tmp = masked_edge_index(edge_index, edge_type == i) - - if not torch.is_floating_point(x_r): - out = out + self.propagate( - tmp, - x=weight[i, x_l], - edge_type_ptr=None, - size=size, - ) - else: - h = self.propagate(tmp, x=x_l, edge_type_ptr=None, - size=size) - out = out + (h @ weight[i]) - - root = self.root - if root is not None: - if not torch.is_floating_point(x_r): - out = out + root[x_r] - else: - out = out + x_r @ root - - if self.bias is not None: - out = out + self.bias - - return out - - def message(self, x_j: Tensor, edge_type_ptr: OptTensor) -> Tensor: - if torch_geometric.typing.WITH_PYG_LIB and edge_type_ptr is not None: - # TODO Re-weight according to edge type degree for `aggr=mean`. - return pyg_lib.ops.segment_matmul(x_j, edge_type_ptr, self.weight) - - return x_j - - def message_and_aggregate(self, adj_t: SparseTensor, x: Tensor) -> Tensor: - adj_t = adj_t.set_value(None) - return spmm(adj_t, x, reduce=self.aggr) - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.in_channels}, ' - f'{self.out_channels}, num_relations={self.num_relations})') - - -class FastRGCNConv(RGCNConv): - r"""See :class:`RGCNConv`.""" - def forward(self, x: Union[OptTensor, Tuple[OptTensor, Tensor]], - edge_index: Adj, edge_type: OptTensor = None): - - self.fuse = False - assert self.aggr in ['add', 'sum', 'mean'] - - # Convert input features to a pair of node features or node indices. - x_l: OptTensor = None - if isinstance(x, tuple): - x_l = x[0] - else: - x_l = x - if x_l is None: - x_l = torch.arange(self.in_channels_l, device=self.weight.device) - - x_r: Tensor = x_l - if isinstance(x, tuple): - x_r = x[1] - - size = (x_l.size(0), x_r.size(0)) - - # propagate_type: (x: Tensor, edge_type: OptTensor) - out = self.propagate(edge_index, x=x_l, edge_type=edge_type, size=size) - - root = self.root - if root is not None: - if not torch.is_floating_point(x_r): - out = out + root[x_r] - else: - out = out + x_r @ root - - if self.bias is not None: - out = out + self.bias - - return out - - def message(self, x_j: Tensor, edge_type: Tensor, - edge_index_j: Tensor) -> Tensor: - weight = self.weight - if self.num_bases is not None: # Basis-decomposition ================= - weight = (self.comp @ weight.view(self.num_bases, -1)).view( - self.num_relations, self.in_channels_l, self.out_channels) - - if self.num_blocks is not None: # Block-diagonal-decomposition ======= - if not torch.is_floating_point(x_j): - raise ValueError('Block-diagonal decomposition not supported ' - 'for non-continuous input features.') - - weight = weight[edge_type].view(-1, weight.size(2), weight.size(3)) - x_j = x_j.view(-1, 1, weight.size(1)) - return torch.bmm(x_j, weight).view(-1, self.out_channels) - - else: # No regularization/Basis-decomposition ======================== - if not torch.is_floating_point(x_j): - weight_index = edge_type * weight.size(1) + edge_index_j - return weight.view(-1, self.out_channels)[weight_index] - - return torch.bmm(x_j.unsqueeze(-2), weight[edge_type]).squeeze(-2) - - def aggregate(self, inputs: Tensor, edge_type: Tensor, index: Tensor, - dim_size: Optional[int] = None) -> Tensor: - - # Compute normalization in separation for each `edge_type`. - if self.aggr == 'mean': - norm = one_hot(edge_type, self.num_relations, dtype=inputs.dtype) - norm = scatter(norm, index, dim=0, dim_size=dim_size)[index] - norm = torch.gather(norm, 1, edge_type.view(-1, 1)) - norm = 1. / norm.clamp_(1.) - inputs = norm * inputs - - return scatter(inputs, index, dim=self.node_dim, dim_size=dim_size) diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/utils/cheatsheet.py b/pytorch_geometric-2.3.1/torch_geometric/nn/conv/utils/cheatsheet.py deleted file mode 100644 index 03f2d0e..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/utils/cheatsheet.py +++ /dev/null @@ -1,70 +0,0 @@ -import importlib -import inspect -import re - - -def paper_title(cls: str) -> str: - cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls] - match = re.search('`\".+?\"', inspect.getdoc(cls), flags=re.DOTALL) - return None if match is None else match.group().replace('\n', ' ')[2:-1] - - -def paper_link(cls: str) -> str: - cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls] - match = re.search('<.+?>', inspect.getdoc(cls), flags=re.DOTALL) - return None if match is None else match.group().replace('\n', ' ')[1:-1] - - -def supports_sparse_tensor(cls: str) -> bool: - cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls] - signature = inspect.signature(cls.forward) - return 'SparseTensor' in str(signature) - - -def supports_edge_weights(cls: str) -> bool: - cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls] - signature = inspect.signature(cls.forward) - return 'edge_weight' in str(signature) - - -def supports_edge_features(cls: str) -> bool: - cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls] - signature = inspect.signature(cls.forward) - return 'edge_attr' in str(signature) - - -def supports_bipartite_graphs(cls: str) -> bool: - cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls] - signature = inspect.signature(cls.forward) - return 'Union[torch.Tensor, Tuple[torch.Tensor' in str(signature) - - -def supports_static_graphs(cls: str) -> bool: - cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls] - return 'node_dim=' not in inspect.getsource(cls.__init__) - - -def supports_lazy_initialization(cls: str) -> bool: - cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls] - doc = re.sub(' +', ' ', inspect.getdoc(cls).replace('\n', ' ')) - match = re.search('or :obj:`-1` to derive the size from the first', doc) - return match is not None - - -def processes_heterogeneous_graphs(cls: str) -> bool: - cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls] - signature = inspect.signature(cls.forward) - return 'edge_index_dict' in str(signature) or 'edge_type' in str(signature) - - -def processes_hypergraphs(cls: str) -> bool: - cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls] - signature = inspect.signature(cls.forward) - return 'hyperedge_index' in str(signature) - - -def processes_point_clouds(cls: str) -> bool: - cls = importlib.import_module('torch_geometric.nn.conv').__dict__[cls] - signature = inspect.signature(cls.forward) - return (('edge_index' not in str(signature) - and 'csc' not in str(signature)) or 'pos' in str(signature)) diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/utils/typing.py b/pytorch_geometric-2.3.1/torch_geometric/nn/conv/utils/typing.py deleted file mode 100644 index 25fdfdd..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/utils/typing.py +++ /dev/null @@ -1,108 +0,0 @@ -import inspect -import re -from collections import OrderedDict -from itertools import product -from typing import Callable, Dict, List, Tuple - -import pyparsing as pp - - -def split_types_repr(types_repr: str) -> List[str]: - out = [] - i = depth = 0 - for j, char in enumerate(types_repr): - if char == '[': - depth += 1 - elif char == ']': - depth -= 1 - elif char == ',' and depth == 0: - out.append(types_repr[i:j].strip()) - i = j + 1 - out.append(types_repr[i:].strip()) - return out - - -def sanitize(type_repr: str): - type_repr = re.sub(r'', r'\1', type_repr) - type_repr = type_repr.replace('typing.', '') - type_repr = type_repr.replace('torch_sparse.tensor.', '') - type_repr = type_repr.replace('Adj', 'Union[Tensor, SparseTensor]') - - # Replace `Union[..., NoneType]` by `Optional[...]`. - sexp = pp.nestedExpr(opener='[', closer=']') - tree = sexp.parseString(f'[{type_repr.replace(",", " ")}]').asList()[0] - - def union_to_optional_(tree): - for i in range(len(tree)): - e, n = tree[i], tree[i + 1] if i + 1 < len(tree) else [] - if e == 'Union' and n[-1] == 'NoneType': - tree[i] = 'Optional' - tree[i + 1] = tree[i + 1][:-1] - elif e == 'Union' and 'NoneType' in n: - idx = n.index('NoneType') - n[idx] = [n[idx - 1]] - n[idx - 1] = 'Optional' - elif isinstance(e, list): - tree[i] = union_to_optional_(e) - return tree - - tree = union_to_optional_(tree) - type_repr = re.sub(r'\'|\"', '', str(tree)[1:-1]).replace(', [', '[') - - return type_repr - - -def param_type_repr(param) -> str: - if param.annotation is inspect.Parameter.empty: - return 'torch.Tensor' - return sanitize(re.split(r':|='.strip(), str(param))[1]) - - -def return_type_repr(signature) -> str: - return_type = signature.return_annotation - if return_type is inspect.Parameter.empty: - return 'torch.Tensor' - elif str(return_type)[:6] != ' List[Tuple[Dict[str, str], str]]: - source = inspect.getsource(func) - signature = inspect.signature(func) - - # Parse `# type: (...) -> ...` annotation. Note that it is allowed to pass - # multiple `# type:` annotations in `forward()`. - iterator = re.finditer(r'#\s*type:\s*\((.*)\)\s*->\s*(.*)\s*\n', source) - matches = list(iterator) - - if len(matches) > 0: - out = [] - args = list(signature.parameters.keys()) - for match in matches: - arg_types_repr, return_type = match.groups() - arg_types = split_types_repr(arg_types_repr) - arg_types = OrderedDict((k, v) for k, v in zip(args, arg_types)) - return_type = return_type.split('#')[0].strip() - out.append((arg_types, return_type)) - return out - - # Alternatively, parse annotations using the inspected signature. - else: - ps = signature.parameters - arg_types = OrderedDict((k, param_type_repr(v)) for k, v in ps.items()) - return [(arg_types, return_type_repr(signature))] - - -def resolve_types(arg_types: Dict[str, str], - return_type_repr: str) -> List[Tuple[List[str], str]]: - out = [] - for type_repr in arg_types.values(): - if type_repr[:5] == 'Union': - out.append(split_types_repr(type_repr[6:-1])) - else: - out.append([type_repr]) - return [(x, return_type_repr) for x in product(*out)] diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/wl_conv_continuous.py b/pytorch_geometric-2.3.1/torch_geometric/nn/conv/wl_conv_continuous.py deleted file mode 100644 index f749460..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/conv/wl_conv_continuous.py +++ /dev/null @@ -1,65 +0,0 @@ -from typing import Union - -from torch import Tensor - -from torch_geometric.nn.conv import MessagePassing -from torch_geometric.typing import OptPairTensor, OptTensor, Size -from torch_geometric.utils import scatter - - -class WLConvContinuous(MessagePassing): - r"""The Weisfeiler Lehman operator from the `"Wasserstein - Weisfeiler-Lehman Graph Kernels" `_ - paper. Refinement is done though a degree-scaled mean aggregation and - works on nodes with continuous attributes: - - .. math:: - \mathbf{x}^{\prime}_i = \frac{1}{2}\big(\mathbf{x}_i + - \frac{1}{\textrm{deg}(i)} - \sum_{j \in \mathcal{N}(i)} e_{j,i} \cdot \mathbf{x}_j \big) - - where :math:`e_{j,i}` denotes the edge weight from source node :obj:`j` to - target node :obj:`i` (default: :obj:`1`) - - Args: - **kwargs (optional): Additional arguments of - :class:`torch_geometric.nn.conv.MessagePassing`. - - Shapes: - - **input:** - node features :math:`(|\mathcal{V}|, F)` or - :math:`((|\mathcal{V_s}|, F), (|\mathcal{V_t}|, F))` if bipartite, - edge indices :math:`(2, |\mathcal{E}|)`, - edge weights :math:`(|\mathcal{E}|)` *(optional)* - - **output:** node features :math:`(|\mathcal{V}|, F)` or - :math:`(|\mathcal{V}_t|, F)` if bipartite - """ - def __init__(self, **kwargs): - super().__init__(aggr='add', **kwargs) - - def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Tensor, - edge_weight: OptTensor = None, size: Size = None) -> Tensor: - - if isinstance(x, Tensor): - x: OptPairTensor = (x, x) - - if edge_weight is None: - edge_weight = x[0].new_ones(edge_index.size(1)) - - # propagate_type: (x: OptPairTensor, edge_weight: Tensor) - out = self.propagate(edge_index, x=x, edge_weight=edge_weight, - size=size) - - deg = scatter(edge_weight, edge_index[1], 0, out.size(0), reduce='sum') - deg_inv = 1. / deg - deg_inv.masked_fill_(deg_inv == float('inf'), 0) - out = deg_inv.view(-1, 1) * out - - x_dst = x[1] - if x_dst is not None: - out = 0.5 * (x_dst + out) - - return out - - def message(self, x_j: Tensor, edge_weight: Tensor) -> Tensor: - return edge_weight.view(-1, 1) * x_j diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/data_parallel.py b/pytorch_geometric-2.3.1/torch_geometric/nn/data_parallel.py deleted file mode 100644 index b3f55b3..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/data_parallel.py +++ /dev/null @@ -1,93 +0,0 @@ -import logging -from itertools import chain - -import torch - -from torch_geometric.data import Batch - - -class DataParallel(torch.nn.DataParallel): - r"""Implements data parallelism at the module level. - - This container parallelizes the application of the given :attr:`module` by - splitting a list of :class:`torch_geometric.data.Data` objects and copying - them as :class:`torch_geometric.data.Batch` objects to each device. - In the forward pass, the module is replicated on each device, and each - replica handles a portion of the input. - During the backwards pass, gradients from each replica are summed into the - original module. - - The batch size should be larger than the number of GPUs used. - - The parallelized :attr:`module` must have its parameters and buffers on - :obj:`device_ids[0]`. - - .. note:: - - You need to use the :class:`torch_geometric.loader.DataListLoader` for - this module. - - Args: - module (Module): Module to be parallelized. - device_ids (list of int or torch.device): CUDA devices. - (default: all devices) - output_device (int or torch.device): Device location of output. - (default: :obj:`device_ids[0]`) - follow_batch (list or tuple, optional): Creates assignment batch - vectors for each key in the list. (default: :obj:`None`) - exclude_keys (list or tuple, optional): Will exclude each key in the - list. (default: :obj:`None`) - """ - def __init__(self, module, device_ids=None, output_device=None, - follow_batch=None, exclude_keys=None): - super().__init__(module, device_ids, output_device) - self.src_device = torch.device(f'cuda:{self.device_ids[0]}') - self.follow_batch = follow_batch or [] - self.exclude_keys = exclude_keys or [] - - def forward(self, data_list): - """""" - if len(data_list) == 0: - logging.warning('DataParallel received an empty data list, which ' - 'may result in unexpected behavior.') - return None - - if not self.device_ids or len(self.device_ids) == 1: # Fallback - data = Batch.from_data_list( - data_list, follow_batch=self.follow_batch, - exclude_keys=self.exclude_keys).to(self.src_device) - return self.module(data) - - for t in chain(self.module.parameters(), self.module.buffers()): - if t.device != self.src_device: - raise RuntimeError( - f"Module must have its parameters and buffers on device " - f"'{self.src_device}' but found one of them on device " - f"'{t.device}'") - - inputs = self.scatter(data_list, self.device_ids) - replicas = self.replicate(self.module, self.device_ids[:len(inputs)]) - outputs = self.parallel_apply(replicas, inputs, None) - return self.gather(outputs, self.output_device) - - def scatter(self, data_list, device_ids): - num_devices = min(len(device_ids), len(data_list)) - - count = torch.tensor([data.num_nodes for data in data_list]) - cumsum = count.cumsum(0) - cumsum = torch.cat([cumsum.new_zeros(1), cumsum], dim=0) - device_id = num_devices * cumsum.to(torch.float) / cumsum[-1].item() - device_id = (device_id[:-1] + device_id[1:]) / 2.0 - device_id = device_id.to(torch.long) # round. - split = device_id.bincount().cumsum(0) - split = torch.cat([split.new_zeros(1), split], dim=0) - split = torch.unique(split, sorted=True) - split = split.tolist() - - return [ - Batch.from_data_list(data_list[split[i]:split[i + 1]], - follow_batch=self.follow_batch, - exclude_keys=self.exclude_keys).to( - torch.device(f'cuda:{device_ids[i]}')) - for i in range(len(split) - 1) - ] diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/dense/linear.py b/pytorch_geometric-2.3.1/torch_geometric/nn/dense/linear.py deleted file mode 100644 index c8971e1..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/dense/linear.py +++ /dev/null @@ -1,405 +0,0 @@ -import copy -import math -from typing import Any, Dict, Optional, Union - -import torch -import torch.nn.functional as F -from torch import Tensor, nn -from torch.nn.parameter import Parameter - -import torch_geometric.typing -from torch_geometric.nn import inits -from torch_geometric.typing import pyg_lib -from torch_geometric.utils import index_sort -from torch_geometric.utils.sparse import index2ptr - - -def is_uninitialized_parameter(x: Any) -> bool: - if not hasattr(nn.parameter, 'UninitializedParameter'): - return False - return isinstance(x, nn.parameter.UninitializedParameter) - - -def reset_weight_(weight: Tensor, in_channels: int, - initializer: Optional[str] = None) -> Tensor: - if in_channels <= 0: - pass - elif initializer == 'glorot': - inits.glorot(weight) - elif initializer == 'uniform': - bound = 1.0 / math.sqrt(in_channels) - torch.nn.init.uniform_(weight.data, -bound, bound) - elif initializer == 'kaiming_uniform': - inits.kaiming_uniform(weight, fan=in_channels, a=math.sqrt(5)) - elif initializer is None: - inits.kaiming_uniform(weight, fan=in_channels, a=math.sqrt(5)) - else: - raise RuntimeError(f"Weight initializer '{initializer}' not supported") - - return weight - - -def reset_bias_(bias: Optional[Tensor], in_channels: int, - initializer: Optional[str] = None) -> Optional[Tensor]: - if bias is None or in_channels <= 0: - pass - elif initializer == 'zeros': - inits.zeros(bias) - elif initializer is None: - inits.uniform(in_channels, bias) - else: - raise RuntimeError(f"Bias initializer '{initializer}' not supported") - - return bias - - -class Linear(torch.nn.Module): - r"""Applies a linear tranformation to the incoming data - - .. math:: - \mathbf{x}^{\prime} = \mathbf{x} \mathbf{W}^{\top} + \mathbf{b} - - similar to :class:`torch.nn.Linear`. - It supports lazy initialization and customizable weight and bias - initialization. - - Args: - in_channels (int): Size of each input sample. Will be initialized - lazily in case it is given as :obj:`-1`. - out_channels (int): Size of each output sample. - bias (bool, optional): If set to :obj:`False`, the layer will not learn - an additive bias. (default: :obj:`True`) - weight_initializer (str, optional): The initializer for the weight - matrix (:obj:`"glorot"`, :obj:`"uniform"`, :obj:`"kaiming_uniform"` - or :obj:`None`). - If set to :obj:`None`, will match default weight initialization of - :class:`torch.nn.Linear`. (default: :obj:`None`) - bias_initializer (str, optional): The initializer for the bias vector - (:obj:`"zeros"` or :obj:`None`). - If set to :obj:`None`, will match default bias initialization of - :class:`torch.nn.Linear`. (default: :obj:`None`) - - Shapes: - - **input:** features :math:`(*, F_{in})` - - **output:** features :math:`(*, F_{out})` - """ - def __init__(self, in_channels: int, out_channels: int, bias: bool = True, - weight_initializer: Optional[str] = None, - bias_initializer: Optional[str] = None): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.weight_initializer = weight_initializer - self.bias_initializer = bias_initializer - - if in_channels > 0: - self.weight = Parameter(torch.Tensor(out_channels, in_channels)) - else: - self.weight = nn.parameter.UninitializedParameter() - self._hook = self.register_forward_pre_hook( - self.initialize_parameters) - - if bias: - self.bias = Parameter(torch.Tensor(out_channels)) - else: - self.register_parameter('bias', None) - - self._load_hook = self._register_load_state_dict_pre_hook( - self._lazy_load_hook) - - self.reset_parameters() - - def __deepcopy__(self, memo): - out = Linear(self.in_channels, self.out_channels, self.bias - is not None, self.weight_initializer, - self.bias_initializer) - if self.in_channels > 0: - out.weight = copy.deepcopy(self.weight, memo) - if self.bias is not None: - out.bias = copy.deepcopy(self.bias, memo) - return out - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - reset_weight_(self.weight, self.in_channels, self.weight_initializer) - reset_bias_(self.bias, self.in_channels, self.bias_initializer) - - def forward(self, x: Tensor) -> Tensor: - r""" - Args: - x (torch.Tensor): The input features. - """ - return F.linear(x, self.weight, self.bias) - - @torch.no_grad() - def initialize_parameters(self, module, input): - if is_uninitialized_parameter(self.weight): - self.in_channels = input[0].size(-1) - self.weight.materialize((self.out_channels, self.in_channels)) - self.reset_parameters() - self._hook.remove() - delattr(self, '_hook') - - def _save_to_state_dict(self, destination, prefix, keep_vars): - if (is_uninitialized_parameter(self.weight) - or torch.onnx.is_in_onnx_export()): - destination[prefix + 'weight'] = self.weight - else: - destination[prefix + 'weight'] = self.weight.detach() - if self.bias is not None: - if torch.onnx.is_in_onnx_export(): - destination[prefix + 'bias'] = self.bias - else: - destination[prefix + 'bias'] = self.bias.detach() - - def _lazy_load_hook(self, state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs): - - weight = state_dict.get(prefix + 'weight', None) - - if weight is not None and is_uninitialized_parameter(weight): - self.in_channels = -1 - self.weight = nn.parameter.UninitializedParameter() - if not hasattr(self, '_hook'): - self._hook = self.register_forward_pre_hook( - self.initialize_parameters) - - elif weight is not None and is_uninitialized_parameter(self.weight): - self.in_channels = weight.size(-1) - self.weight.materialize((self.out_channels, self.in_channels)) - if hasattr(self, '_hook'): - self._hook.remove() - delattr(self, '_hook') - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.in_channels}, ' - f'{self.out_channels}, bias={self.bias is not None})') - - -class HeteroLinear(torch.nn.Module): - r"""Applies separate linear tranformations to the incoming data according - to types - - .. math:: - \mathbf{x}^{\prime}_{\kappa} = \mathbf{x}_{\kappa} - \mathbf{W}^{\top}_{\kappa} + \mathbf{b}_{\kappa} - - for type :math:`\kappa`. - It supports lazy initialization and customizable weight and bias - initialization. - - Args: - in_channels (int): Size of each input sample. Will be initialized - lazily in case it is given as :obj:`-1`. - out_channels (int): Size of each output sample. - num_types (int): The number of types. - is_sorted (bool, optional): If set to :obj:`True`, assumes that - :obj:`type_vec` is sorted. This avoids internal re-sorting of the - data and can improve runtime and memory efficiency. - (default: :obj:`False`) - **kwargs (optional): Additional arguments of - :class:`torch_geometric.nn.Linear`. - - Shapes: - - **input:** - features :math:`(*, F_{in})`, - type vector :math:`(*)` - - **output:** features :math:`(*, F_{out})` - """ - def __init__(self, in_channels: int, out_channels: int, num_types: int, - is_sorted: bool = False, **kwargs): - super().__init__() - - self.in_channels = in_channels - self.out_channels = out_channels - self.num_types = num_types - self.is_sorted = is_sorted - self.kwargs = kwargs - - if torch_geometric.typing.WITH_PYG_LIB: - self.lins = None - if self.in_channels == -1: - self.weight = nn.parameter.UninitializedParameter() - self._hook = self.register_forward_pre_hook( - self.initialize_parameters) - else: - self.weight = torch.nn.Parameter( - torch.Tensor(num_types, in_channels, out_channels)) - if kwargs.get('bias', True): - self.bias = Parameter(torch.Tensor(num_types, out_channels)) - else: - self.register_parameter('bias', None) - else: - self.lins = torch.nn.ModuleList([ - Linear(in_channels, out_channels, **kwargs) - for _ in range(num_types) - ]) - self.register_parameter('weight', None) - self.register_parameter('bias', None) - - self.reset_parameters() - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - if torch_geometric.typing.WITH_PYG_LIB: - reset_weight_(self.weight, self.in_channels, - self.kwargs.get('weight_initializer', None)) - reset_weight_(self.bias, self.in_channels, - self.kwargs.get('bias_initializer', None)) - else: - for lin in self.lins: - lin.reset_parameters() - - def forward(self, x: Tensor, type_vec: Tensor) -> Tensor: - r""" - Args: - x (torch.Tensor): The input features. - type_vec (torch.Tensor): A vector that maps each entry to a type. - """ - if torch_geometric.typing.WITH_PYG_LIB: - assert self.weight is not None - - perm: Optional[Tensor] = None - if not self.is_sorted: - if (type_vec[1:] < type_vec[:-1]).any(): - type_vec, perm = index_sort(type_vec, self.num_types) - x = x[perm] - - type_vec_ptr = index2ptr(type_vec, self.num_types) - out = pyg_lib.ops.segment_matmul(x, type_vec_ptr, self.weight) - if self.bias is not None: - out += self.bias[type_vec] - - if perm is not None: # Restore original order (if necessary). - out_unsorted = torch.empty_like(out) - out_unsorted[perm] = out - out = out_unsorted - else: - assert self.lins is not None - out = x.new_empty(x.size(0), self.out_channels) - for i, lin in enumerate(self.lins): - mask = type_vec == i - out[mask] = lin(x[mask]) - return out - - @torch.no_grad() - def initialize_parameters(self, module, input): - if is_uninitialized_parameter(self.weight): - self.in_channels = input[0].size(-1) - self.weight.materialize( - (self.num_types, self.in_channels, self.out_channels)) - self.reset_parameters() - self._hook.remove() - delattr(self, '_hook') - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.in_channels}, ' - f'{self.out_channels}, num_types={self.num_types}, ' - f'bias={self.kwargs.get("bias", True)})') - - -class HeteroDictLinear(torch.nn.Module): - r"""Applies separate linear tranformations to the incoming data dictionary - - .. math:: - \mathbf{x}^{\prime}_{\kappa} = \mathbf{x}_{\kappa} - \mathbf{W}^{\top}_{\kappa} + \mathbf{b}_{\kappa} - - for key :math:`\kappa`. - It supports lazy initialization and customizable weight and bias - initialization. - - Args: - in_channels (int or Dict[Any, int]): Size of each input sample. If - passed an integer, :obj:`types` will be a mandatory argument. - initialized lazily in case it is given as :obj:`-1`. - out_channels (int): Size of each output sample. - types (List[Any], optional): The keys of the input dictionary. - (default: :obj:`None`) - **kwargs (optional): Additional arguments of - :class:`torch_geometric.nn.Linear`. - """ - def __init__( - self, - in_channels: Union[int, Dict[Any, int]], - out_channels: int, - types: Optional[Any] = None, - **kwargs, - ): - super().__init__() - - if isinstance(in_channels, dict): - self.types = list(in_channels.keys()) - - if any([i == -1 for i in in_channels.values()]): - self._hook = self.register_forward_pre_hook( - self.initialize_parameters) - - if types is not None and set(self.types) != set(types): - raise ValueError("The provided 'types' do not match with the " - "keys in the 'in_channels' dictionary") - - else: - if types is None: - raise ValueError("Please provide a list of 'types' if passing " - "'in_channels' as an integer") - - if in_channels == -1: - self._hook = self.register_forward_pre_hook( - self.initialize_parameters) - - self.types = types - in_channels = {node_type: in_channels for node_type in types} - - self.in_channels = in_channels - self.out_channels = out_channels - self.kwargs = kwargs - - self.lins = torch.nn.ModuleDict({ - key: Linear(channels, self.out_channels, **kwargs) - for key, channels in self.in_channels.items() - }) - - self.reset_parameters() - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - for lin in self.lins.values(): - lin.reset_parameters() - - def forward( - self, - x_dict: Dict[Any, Tensor], - ) -> Dict[Any, Tensor]: - r""" - Args: - x_dict (Dict[Any, torch.Tensor]): A dictionary holding input - features for each individual type. - """ - if torch_geometric.typing.WITH_GMM: - xs = [x_dict[key] for key in x_dict.keys()] - weights = [self.lins[key].weight.t() for key in x_dict.keys()] - if self.kwargs.get('bias', True): - biases = [self.lins[key].bias for key in x_dict.keys()] - else: - biases = None - - outs = pyg_lib.ops.grouped_matmul(xs, weights, biases) - return {key: out for key, out in zip(x_dict.keys(), outs)} - - return {key: self.lins[key](x) for key, x in x_dict.items()} - - @torch.no_grad() - def initialize_parameters(self, module, input): - for key, x in input[0].items(): - lin = self.lins[key] - if is_uninitialized_parameter(lin.weight): - self.lins[key].initialize_parameters(None, x) - self.reset_parameters() - self._hook.remove() - self.in_channels = {key: x.size(-1) for key, x in input[0].items()} - delattr(self, '_hook') - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.in_channels}, ' - f'{self.out_channels}, bias={self.kwargs.get("bias", True)})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/kge/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/nn/kge/__init__.py deleted file mode 100644 index 35e2852..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/kge/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from .base import KGEModel -from .transe import TransE -from .complex import ComplEx -from .distmult import DistMult - -__all__ = classes = [ - 'KGEModel', - 'TransE', - 'ComplEx', - 'DistMult', -] diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/models/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/nn/models/__init__.py deleted file mode 100644 index 46b9fd2..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/models/__init__.py +++ /dev/null @@ -1,65 +0,0 @@ -from .mlp import MLP -from .basic_gnn import GCN, GraphSAGE, GIN, GAT, PNA, EdgeCNN -from .jumping_knowledge import JumpingKnowledge -from .meta import MetaLayer -from .node2vec import Node2Vec -from .deep_graph_infomax import DeepGraphInfomax -from .autoencoder import InnerProductDecoder, GAE, VGAE, ARGA, ARGVA -from .signed_gcn import SignedGCN -from .re_net import RENet -from .graph_unet import GraphUNet -from .schnet import SchNet -from .dimenet import DimeNet, DimeNetPlusPlus -from .captum import to_captum_model, to_captum_input, captum_output_to_dicts -from .metapath2vec import MetaPath2Vec -from .deepgcn import DeepGCNLayer -from .tgn import TGNMemory -from .label_prop import LabelPropagation -from .correct_and_smooth import CorrectAndSmooth -from .attentive_fp import AttentiveFP -from .rect import RECT_L -from .linkx import LINKX -from .lightgcn import LightGCN -from .mask_label import MaskLabel -from .rev_gnn import GroupAddRev -from .gnnff import GNNFF - -__all__ = classes = [ - 'MLP', - 'GCN', - 'GraphSAGE', - 'GIN', - 'GAT', - 'PNA', - 'EdgeCNN', - 'JumpingKnowledge', - 'MetaLayer', - 'Node2Vec', - 'DeepGraphInfomax', - 'InnerProductDecoder', - 'GAE', - 'VGAE', - 'ARGA', - 'ARGVA', - 'SignedGCN', - 'RENet', - 'GraphUNet', - 'SchNet', - 'DimeNet', - 'DimeNetPlusPlus', - 'to_captum_model', - 'to_captum_input', - 'captum_output_to_dicts', - 'MetaPath2Vec', - 'DeepGCNLayer', - 'TGNMemory', - 'LabelPropagation', - 'CorrectAndSmooth', - 'AttentiveFP', - 'RECT_L', - 'LINKX', - 'LightGCN', - 'MaskLabel', - 'GroupAddRev', - 'GNNFF', -] diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/models/attentive_fp.py b/pytorch_geometric-2.3.1/torch_geometric/nn/models/attentive_fp.py deleted file mode 100644 index 9487806..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/models/attentive_fp.py +++ /dev/null @@ -1,180 +0,0 @@ -from typing import Optional - -import torch -import torch.nn.functional as F -from torch import Tensor -from torch.nn import GRUCell, Linear, Parameter - -from torch_geometric.nn import GATConv, MessagePassing, global_add_pool -from torch_geometric.nn.inits import glorot, zeros -from torch_geometric.typing import Adj, OptTensor -from torch_geometric.utils import softmax - - -class GATEConv(MessagePassing): - def __init__( - self, - in_channels: int, - out_channels: int, - edge_dim: int, - dropout: float = 0.0, - ): - super().__init__(aggr='add', node_dim=0) - - self.dropout = dropout - - self.att_l = Parameter(torch.Tensor(1, out_channels)) - self.att_r = Parameter(torch.Tensor(1, in_channels)) - - self.lin1 = Linear(in_channels + edge_dim, out_channels, False) - self.lin2 = Linear(out_channels, out_channels, False) - - self.bias = Parameter(torch.Tensor(out_channels)) - - self.reset_parameters() - - def reset_parameters(self): - glorot(self.att_l) - glorot(self.att_r) - glorot(self.lin1.weight) - glorot(self.lin2.weight) - zeros(self.bias) - - def forward(self, x: Tensor, edge_index: Adj, edge_attr: Tensor) -> Tensor: - # propagate_type: (x: Tensor, edge_attr: Tensor) - out = self.propagate(edge_index, x=x, edge_attr=edge_attr, size=None) - out = out + self.bias - return out - - def message(self, x_j: Tensor, x_i: Tensor, edge_attr: Tensor, - index: Tensor, ptr: OptTensor, - size_i: Optional[int]) -> Tensor: - - x_j = F.leaky_relu_(self.lin1(torch.cat([x_j, edge_attr], dim=-1))) - alpha_j = (x_j * self.att_l).sum(dim=-1) - alpha_i = (x_i * self.att_r).sum(dim=-1) - alpha = alpha_j + alpha_i - alpha = F.leaky_relu_(alpha) - alpha = softmax(alpha, index, ptr, size_i) - alpha = F.dropout(alpha, p=self.dropout, training=self.training) - return self.lin2(x_j) * alpha.unsqueeze(-1) - - -class AttentiveFP(torch.nn.Module): - r"""The Attentive FP model for molecular representation learning from the - `"Pushing the Boundaries of Molecular Representation for Drug Discovery - with the Graph Attention Mechanism" - `_ paper, based on - graph attention mechanisms. - - Args: - in_channels (int): Size of each input sample. - hidden_channels (int): Hidden node feature dimensionality. - out_channels (int): Size of each output sample. - edge_dim (int): Edge feature dimensionality. - num_layers (int): Number of GNN layers. - num_timesteps (int): Number of iterative refinement steps for global - readout. - dropout (float, optional): Dropout probability. (default: :obj:`0.0`) - - """ - def __init__( - self, - in_channels: int, - hidden_channels: int, - out_channels: int, - edge_dim: int, - num_layers: int, - num_timesteps: int, - dropout: float = 0.0, - ): - super().__init__() - - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.edge_dim = edge_dim - self.num_layers = num_layers - self.num_timesteps = num_timesteps - self.dropout = dropout - - self.lin1 = Linear(in_channels, hidden_channels) - - self.gate_conv = GATEConv(hidden_channels, hidden_channels, edge_dim, - dropout) - self.gru = GRUCell(hidden_channels, hidden_channels) - - self.atom_convs = torch.nn.ModuleList() - self.atom_grus = torch.nn.ModuleList() - for _ in range(num_layers - 1): - conv = GATConv(hidden_channels, hidden_channels, dropout=dropout, - add_self_loops=False, negative_slope=0.01) - self.atom_convs.append(conv) - self.atom_grus.append(GRUCell(hidden_channels, hidden_channels)) - - self.mol_conv = GATConv(hidden_channels, hidden_channels, - dropout=dropout, add_self_loops=False, - negative_slope=0.01) - self.mol_gru = GRUCell(hidden_channels, hidden_channels) - - self.lin2 = Linear(hidden_channels, out_channels) - - self.reset_parameters() - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - self.lin1.reset_parameters() - self.gate_conv.reset_parameters() - self.gru.reset_parameters() - for conv, gru in zip(self.atom_convs, self.atom_grus): - conv.reset_parameters() - gru.reset_parameters() - self.mol_conv.reset_parameters() - self.mol_gru.reset_parameters() - self.lin2.reset_parameters() - - def forward(self, x: Tensor, edge_index: Tensor, edge_attr: Tensor, - batch: Tensor) -> Tensor: - """""" - # Atom Embedding: - x = F.leaky_relu_(self.lin1(x)) - - h = F.elu_(self.gate_conv(x, edge_index, edge_attr)) - h = F.dropout(h, p=self.dropout, training=self.training) - x = self.gru(h, x).relu_() - - for conv, gru in zip(self.atom_convs, self.atom_grus): - h = F.elu_(conv(x, edge_index)) - h = F.dropout(h, p=self.dropout, training=self.training) - x = gru(h, x).relu_() - - # Molecule Embedding: - row = torch.arange(batch.size(0), device=batch.device) - edge_index = torch.stack([row, batch], dim=0) - - out = global_add_pool(x, batch).relu_() - for t in range(self.num_timesteps): - h = F.elu_(self.mol_conv((x, out), edge_index)) - h = F.dropout(h, p=self.dropout, training=self.training) - out = self.mol_gru(h, out).relu_() - - # Predictor: - out = F.dropout(out, p=self.dropout, training=self.training) - return self.lin2(out) - - def jittable(self) -> 'AttentiveFP': - self.gate_conv = self.gate_conv.jittable() - self.atom_convs = torch.nn.ModuleList( - [conv.jittable() for conv in self.atom_convs]) - self.mol_conv = self.mol_conv.jittable() - return self - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}(' - f'in_channels={self.in_channels}, ' - f'hidden_channels={self.hidden_channels}, ' - f'out_channels={self.out_channels}, ' - f'edge_dim={self.edge_dim}, ' - f'num_layers={self.num_layers}, ' - f'num_timesteps={self.num_timesteps}' - f')') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/models/basic_gnn.py b/pytorch_geometric-2.3.1/torch_geometric/nn/models/basic_gnn.py deleted file mode 100644 index b40bcdc..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/models/basic_gnn.py +++ /dev/null @@ -1,601 +0,0 @@ -import copy -from typing import Any, Callable, Dict, List, Optional, Tuple, Union - -import torch -import torch.nn.functional as F -from torch import Tensor -from torch.nn import Linear, ModuleList -from tqdm import tqdm - -from torch_geometric.loader import NeighborLoader -from torch_geometric.nn.conv import ( - EdgeConv, - GATConv, - GATv2Conv, - GCNConv, - GINConv, - MessagePassing, - PNAConv, - SAGEConv, -) -from torch_geometric.nn.models import MLP -from torch_geometric.nn.models.jumping_knowledge import JumpingKnowledge -from torch_geometric.nn.resolver import ( - activation_resolver, - normalization_resolver, -) -from torch_geometric.typing import Adj, OptTensor -from torch_geometric.utils.trim_to_layer import TrimToLayer - - -class BasicGNN(torch.nn.Module): - r"""An abstract class for implementing basic GNN models. - - Args: - in_channels (int or tuple): Size of each input sample, or :obj:`-1` to - derive the size from the first input(s) to the forward method. - A tuple corresponds to the sizes of source and target - dimensionalities. - hidden_channels (int): Size of each hidden sample. - num_layers (int): Number of message passing layers. - out_channels (int, optional): If not set to :obj:`None`, will apply a - final linear transformation to convert hidden node embeddings to - output size :obj:`out_channels`. (default: :obj:`None`) - dropout (float, optional): Dropout probability. (default: :obj:`0.`) - act (str or Callable, optional): The non-linear activation function to - use. (default: :obj:`"relu"`) - act_first (bool, optional): If set to :obj:`True`, activation is - applied before normalization. (default: :obj:`False`) - act_kwargs (Dict[str, Any], optional): Arguments passed to the - respective activation function defined by :obj:`act`. - (default: :obj:`None`) - norm (str or Callable, optional): The normalization function to - use. (default: :obj:`None`) - norm_kwargs (Dict[str, Any], optional): Arguments passed to the - respective normalization function defined by :obj:`norm`. - (default: :obj:`None`) - jk (str, optional): The Jumping Knowledge mode. If specified, the model - will additionally apply a final linear transformation to transform - node embeddings to the expected output feature dimensionality. - (:obj:`None`, :obj:`"last"`, :obj:`"cat"`, :obj:`"max"`, - :obj:`"lstm"`). (default: :obj:`None`) - **kwargs (optional): Additional arguments of the underlying - :class:`torch_geometric.nn.conv.MessagePassing` layers. - """ - def __init__( - self, - in_channels: int, - hidden_channels: int, - num_layers: int, - out_channels: Optional[int] = None, - dropout: float = 0.0, - act: Union[str, Callable, None] = "relu", - act_first: bool = False, - act_kwargs: Optional[Dict[str, Any]] = None, - norm: Union[str, Callable, None] = None, - norm_kwargs: Optional[Dict[str, Any]] = None, - jk: Optional[str] = None, - **kwargs, - ): - super().__init__() - - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.num_layers = num_layers - - self.dropout = dropout - self.act = activation_resolver(act, **(act_kwargs or {})) - self.jk_mode = jk - self.act_first = act_first - self.norm = norm if isinstance(norm, str) else None - self.norm_kwargs = norm_kwargs - - if out_channels is not None: - self.out_channels = out_channels - else: - self.out_channels = hidden_channels - - self.convs = ModuleList() - if num_layers > 1: - self.convs.append( - self.init_conv(in_channels, hidden_channels, **kwargs)) - if isinstance(in_channels, (tuple, list)): - in_channels = (hidden_channels, hidden_channels) - else: - in_channels = hidden_channels - for _ in range(num_layers - 2): - self.convs.append( - self.init_conv(in_channels, hidden_channels, **kwargs)) - if isinstance(in_channels, (tuple, list)): - in_channels = (hidden_channels, hidden_channels) - else: - in_channels = hidden_channels - if out_channels is not None and jk is None: - self._is_conv_to_out = True - self.convs.append( - self.init_conv(in_channels, out_channels, **kwargs)) - else: - self.convs.append( - self.init_conv(in_channels, hidden_channels, **kwargs)) - - self.norms = None - if norm is not None: - norm_layer = normalization_resolver( - norm, - hidden_channels, - **(norm_kwargs or {}), - ) - self.norms = ModuleList() - for _ in range(num_layers - 1): - self.norms.append(copy.deepcopy(norm_layer)) - if jk is not None: - self.norms.append(copy.deepcopy(norm_layer)) - - if jk is not None and jk != 'last': - self.jk = JumpingKnowledge(jk, hidden_channels, num_layers) - - if jk is not None: - if jk == 'cat': - in_channels = num_layers * hidden_channels - else: - in_channels = hidden_channels - self.lin = Linear(in_channels, self.out_channels) - - # We define `trim_to_layer` functionality as a module such that we can - # still use `to_hetero` on-top. - self._trim = TrimToLayer() - - def init_conv(self, in_channels: Union[int, Tuple[int, int]], - out_channels: int, **kwargs) -> MessagePassing: - raise NotImplementedError - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - for conv in self.convs: - conv.reset_parameters() - for norm in self.norms or []: - norm.reset_parameters() - if hasattr(self, 'jk'): - self.jk.reset_parameters() - if hasattr(self, 'lin'): - self.lin.reset_parameters() - - def forward( - self, - x: Tensor, - edge_index: Adj, - *, - edge_weight: OptTensor = None, - edge_attr: OptTensor = None, - num_sampled_nodes_per_hop: Optional[List[int]] = None, - num_sampled_edges_per_hop: Optional[List[int]] = None, - ) -> Tensor: - r""" - Args: - x (torch.Tensor): The input node features. - edge_index (torch.Tensor): The edge indices. - edge_weight (torch.Tensor, optional): The edge weights (if - supported by the underlying GNN layer). (default: :obj:`None`) - edge_attr (torch.Tensor, optional): The edge features (if supported - by the underlying GNN layer). (default: :obj:`None`) - num_sampled_nodes_per_hop (List[int], optional): The number of - sampled nodes per hop. - Useful in :class:~torch_geometric.loader.NeighborLoader` - scenarios to only operate on minimal-sized representations. - (default: :obj:`None`) - num_sampled_edges_per_hop (List[int], optional): The number of - sampled edges per hop. - Useful in :class:~torch_geometric.loader.NeighborLoader` - scenarios to only operate on minimal-sized representations. - (default: :obj:`None`) - """ - if (num_sampled_nodes_per_hop is not None - and isinstance(edge_weight, Tensor) - and isinstance(edge_attr, Tensor)): - raise NotImplementedError("'trim_to_layer' functionality does not " - "yet support trimming of both " - "'edge_weight' and 'edge_attr'") - - xs: List[Tensor] = [] - for i in range(self.num_layers): - if num_sampled_nodes_per_hop is not None: - x, edge_index, value = self._trim( - i, - num_sampled_nodes_per_hop, - num_sampled_edges_per_hop, - x, - edge_index, - edge_weight if edge_weight is not None else edge_attr, - ) - if edge_weight is not None: - edge_weight = value - else: - edge_attr = value - - # Tracing the module is not allowed with *args and **kwargs :( - # As such, we rely on a static solution to pass optional edge - # weights and edge attributes to the module. - if self.supports_edge_weight and self.supports_edge_attr: - x = self.convs[i](x, edge_index, edge_weight=edge_weight, - edge_attr=edge_attr) - elif self.supports_edge_weight: - x = self.convs[i](x, edge_index, edge_weight=edge_weight) - elif self.supports_edge_attr: - x = self.convs[i](x, edge_index, edge_attr=edge_attr) - else: - x = self.convs[i](x, edge_index) - if i == self.num_layers - 1 and self.jk_mode is None: - break - if self.act is not None and self.act_first: - x = self.act(x) - if self.norms is not None: - x = self.norms[i](x) - if self.act is not None and not self.act_first: - x = self.act(x) - x = F.dropout(x, p=self.dropout, training=self.training) - if hasattr(self, 'jk'): - xs.append(x) - - x = self.jk(xs) if hasattr(self, 'jk') else x - x = self.lin(x) if hasattr(self, 'lin') else x - return x - - @torch.no_grad() - def inference(self, loader: NeighborLoader, - device: Optional[torch.device] = None, - progress_bar: bool = False) -> Tensor: - r"""Performs layer-wise inference on large-graphs using a - :class:`~torch_geometric.loader.NeighborLoader`, where - :class:`~torch_geometric.loader.NeighborLoader` should sample the - full neighborhood for only one layer. - This is an efficient way to compute the output embeddings for all - nodes in the graph. - Only applicable in case :obj:`jk=None` or `jk='last'`. - """ - assert self.jk_mode is None or self.jk_mode == 'last' - assert isinstance(loader, NeighborLoader) - assert len(loader.dataset) == loader.data.num_nodes - assert len(loader.node_sampler.num_neighbors) == 1 - assert not self.training - # assert not loader.shuffle # TODO (matthias) does not work :( - if progress_bar: - pbar = tqdm(total=len(self.convs) * len(loader)) - pbar.set_description('Inference') - - x_all = loader.data.x.cpu() - loader.data.n_id = torch.arange(x_all.size(0)) - - for i in range(self.num_layers): - xs: List[Tensor] = [] - for batch in loader: - x = x_all[batch.n_id].to(device) - if hasattr(batch, 'adj_t'): - edge_index = batch.adj_t.to(device) - else: - edge_index = batch.edge_index.to(device) - x = self.convs[i](x, edge_index)[:batch.batch_size] - if i == self.num_layers - 1 and self.jk_mode is None: - xs.append(x.cpu()) - if progress_bar: - pbar.update(1) - continue - if self.act is not None and self.act_first: - x = self.act(x) - if self.norms is not None: - x = self.norms[i](x) - if self.act is not None and not self.act_first: - x = self.act(x) - if i == self.num_layers - 1 and hasattr(self, 'lin'): - x = self.lin(x) - xs.append(x.cpu()) - if progress_bar: - pbar.update(1) - x_all = torch.cat(xs, dim=0) - if progress_bar: - pbar.close() - del loader.data.n_id - - return x_all - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.in_channels}, ' - f'{self.out_channels}, num_layers={self.num_layers})') - - -class GCN(BasicGNN): - r"""The Graph Neural Network from the `"Semi-supervised - Classification with Graph Convolutional Networks" - `_ paper, using the - :class:`~torch_geometric.nn.conv.GCNConv` operator for message passing. - - Args: - in_channels (int): Size of each input sample, or :obj:`-1` to derive - the size from the first input(s) to the forward method. - hidden_channels (int): Size of each hidden sample. - num_layers (int): Number of message passing layers. - out_channels (int, optional): If not set to :obj:`None`, will apply a - final linear transformation to convert hidden node embeddings to - output size :obj:`out_channels`. (default: :obj:`None`) - dropout (float, optional): Dropout probability. (default: :obj:`0.`) - act (str or Callable, optional): The non-linear activation function to - use. (default: :obj:`"relu"`) - act_first (bool, optional): If set to :obj:`True`, activation is - applied before normalization. (default: :obj:`False`) - act_kwargs (Dict[str, Any], optional): Arguments passed to the - respective activation function defined by :obj:`act`. - (default: :obj:`None`) - norm (str or Callable, optional): The normalization function to - use. (default: :obj:`None`) - norm_kwargs (Dict[str, Any], optional): Arguments passed to the - respective normalization function defined by :obj:`norm`. - (default: :obj:`None`) - jk (str, optional): The Jumping Knowledge mode. If specified, the model - will additionally apply a final linear transformation to transform - node embeddings to the expected output feature dimensionality. - (:obj:`None`, :obj:`"last"`, :obj:`"cat"`, :obj:`"max"`, - :obj:`"lstm"`). (default: :obj:`None`) - **kwargs (optional): Additional arguments of - :class:`torch_geometric.nn.conv.GCNConv`. - """ - supports_edge_weight = True - supports_edge_attr = False - - def init_conv(self, in_channels: int, out_channels: int, - **kwargs) -> MessagePassing: - return GCNConv(in_channels, out_channels, **kwargs) - - -class GraphSAGE(BasicGNN): - r"""The Graph Neural Network from the `"Inductive Representation Learning - on Large Graphs" `_ paper, using the - :class:`~torch_geometric.nn.SAGEConv` operator for message passing. - - Args: - in_channels (int or tuple): Size of each input sample, or :obj:`-1` to - derive the size from the first input(s) to the forward method. - A tuple corresponds to the sizes of source and target - dimensionalities. - hidden_channels (int): Size of each hidden sample. - num_layers (int): Number of message passing layers. - out_channels (int, optional): If not set to :obj:`None`, will apply a - final linear transformation to convert hidden node embeddings to - output size :obj:`out_channels`. (default: :obj:`None`) - dropout (float, optional): Dropout probability. (default: :obj:`0.`) - act (str or Callable, optional): The non-linear activation function to - use. (default: :obj:`"relu"`) - act_first (bool, optional): If set to :obj:`True`, activation is - applied before normalization. (default: :obj:`False`) - act_kwargs (Dict[str, Any], optional): Arguments passed to the - respective activation function defined by :obj:`act`. - (default: :obj:`None`) - norm (str or Callable, optional): The normalization function to - use. (default: :obj:`None`) - norm_kwargs (Dict[str, Any], optional): Arguments passed to the - respective normalization function defined by :obj:`norm`. - (default: :obj:`None`) - jk (str, optional): The Jumping Knowledge mode. If specified, the model - will additionally apply a final linear transformation to transform - node embeddings to the expected output feature dimensionality. - (:obj:`None`, :obj:`"last"`, :obj:`"cat"`, :obj:`"max"`, - :obj:`"lstm"`). (default: :obj:`None`) - **kwargs (optional): Additional arguments of - :class:`torch_geometric.nn.conv.SAGEConv`. - """ - supports_edge_weight = False - supports_edge_attr = False - - def init_conv(self, in_channels: Union[int, Tuple[int, int]], - out_channels: int, **kwargs) -> MessagePassing: - return SAGEConv(in_channels, out_channels, **kwargs) - - -class GIN(BasicGNN): - r"""The Graph Neural Network from the `"How Powerful are Graph Neural - Networks?" `_ paper, using the - :class:`~torch_geometric.nn.GINConv` operator for message passing. - - Args: - in_channels (int): Size of each input sample. - hidden_channels (int): Size of each hidden sample. - num_layers (int): Number of message passing layers. - out_channels (int, optional): If not set to :obj:`None`, will apply a - final linear transformation to convert hidden node embeddings to - output size :obj:`out_channels`. (default: :obj:`None`) - dropout (float, optional): Dropout probability. (default: :obj:`0.`) - act (str or Callable, optional): The non-linear activation function to - use. (default: :obj:`"relu"`) - act_first (bool, optional): If set to :obj:`True`, activation is - applied before normalization. (default: :obj:`False`) - act_kwargs (Dict[str, Any], optional): Arguments passed to the - respective activation function defined by :obj:`act`. - (default: :obj:`None`) - norm (str or Callable, optional): The normalization function to - use. (default: :obj:`None`) - norm_kwargs (Dict[str, Any], optional): Arguments passed to the - respective normalization function defined by :obj:`norm`. - (default: :obj:`None`) - jk (str, optional): The Jumping Knowledge mode. If specified, the model - will additionally apply a final linear transformation to transform - node embeddings to the expected output feature dimensionality. - (:obj:`None`, :obj:`"last"`, :obj:`"cat"`, :obj:`"max"`, - :obj:`"lstm"`). (default: :obj:`None`) - **kwargs (optional): Additional arguments of - :class:`torch_geometric.nn.conv.GINConv`. - """ - supports_edge_weight = False - supports_edge_attr = False - - def init_conv(self, in_channels: int, out_channels: int, - **kwargs) -> MessagePassing: - mlp = MLP( - [in_channels, out_channels, out_channels], - act=self.act, - act_first=self.act_first, - norm=self.norm, - norm_kwargs=self.norm_kwargs, - ) - return GINConv(mlp, **kwargs) - - -class GAT(BasicGNN): - r"""The Graph Neural Network from `"Graph Attention Networks" - `_ or `"How Attentive are Graph Attention - Networks?" `_ papers, using the - :class:`~torch_geometric.nn.GATConv` or - :class:`~torch_geometric.nn.GATv2Conv` operator for message passing, - respectively. - - Args: - in_channels (int or tuple): Size of each input sample, or :obj:`-1` to - derive the size from the first input(s) to the forward method. - A tuple corresponds to the sizes of source and target - dimensionalities. - hidden_channels (int): Size of each hidden sample. - num_layers (int): Number of message passing layers. - out_channels (int, optional): If not set to :obj:`None`, will apply a - final linear transformation to convert hidden node embeddings to - output size :obj:`out_channels`. (default: :obj:`None`) - v2 (bool, optional): If set to :obj:`True`, will make use of - :class:`~torch_geometric.nn.conv.GATv2Conv` rather than - :class:`~torch_geometric.nn.conv.GATConv`. (default: :obj:`False`) - dropout (float, optional): Dropout probability. (default: :obj:`0.`) - act (str or Callable, optional): The non-linear activation function to - use. (default: :obj:`"relu"`) - act_first (bool, optional): If set to :obj:`True`, activation is - applied before normalization. (default: :obj:`False`) - act_kwargs (Dict[str, Any], optional): Arguments passed to the - respective activation function defined by :obj:`act`. - (default: :obj:`None`) - norm (str or Callable, optional): The normalization function to - use. (default: :obj:`None`) - norm_kwargs (Dict[str, Any], optional): Arguments passed to the - respective normalization function defined by :obj:`norm`. - (default: :obj:`None`) - jk (str, optional): The Jumping Knowledge mode. If specified, the model - will additionally apply a final linear transformation to transform - node embeddings to the expected output feature dimensionality. - (:obj:`None`, :obj:`"last"`, :obj:`"cat"`, :obj:`"max"`, - :obj:`"lstm"`). (default: :obj:`None`) - **kwargs (optional): Additional arguments of - :class:`torch_geometric.nn.conv.GATConv` or - :class:`torch_geometric.nn.conv.GATv2Conv`. - """ - supports_edge_weight = False - supports_edge_attr = True - - def init_conv(self, in_channels: Union[int, Tuple[int, int]], - out_channels: int, **kwargs) -> MessagePassing: - - v2 = kwargs.pop('v2', False) - heads = kwargs.pop('heads', 1) - concat = kwargs.pop('concat', True) - - # Do not use concatenation in case the layer `GATConv` layer maps to - # the desired output channels (out_channels != None and jk != None): - if getattr(self, '_is_conv_to_out', False): - concat = False - - if concat and out_channels % heads != 0: - raise ValueError(f"Ensure that the number of output channels of " - f"'GATConv' (got '{out_channels}') is divisible " - f"by the number of heads (got '{heads}')") - - if concat: - out_channels = out_channels // heads - - Conv = GATConv if not v2 else GATv2Conv - return Conv(in_channels, out_channels, heads=heads, concat=concat, - dropout=self.dropout, **kwargs) - - -class PNA(BasicGNN): - r"""The Graph Neural Network from the `"Principal Neighbourhood Aggregation - for Graph Nets" `_ paper, using the - :class:`~torch_geometric.nn.conv.PNAConv` operator for message passing. - - Args: - in_channels (int): Size of each input sample, or :obj:`-1` to derive - the size from the first input(s) to the forward method. - hidden_channels (int): Size of each hidden sample. - num_layers (int): Number of message passing layers. - out_channels (int, optional): If not set to :obj:`None`, will apply a - final linear transformation to convert hidden node embeddings to - output size :obj:`out_channels`. (default: :obj:`None`) - dropout (float, optional): Dropout probability. (default: :obj:`0.`) - act (str or Callable, optional): The non-linear activation function to - use. (default: :obj:`"relu"`) - act_first (bool, optional): If set to :obj:`True`, activation is - applied before normalization. (default: :obj:`False`) - act_kwargs (Dict[str, Any], optional): Arguments passed to the - respective activation function defined by :obj:`act`. - (default: :obj:`None`) - norm (str or Callable, optional): The normalization function to - use. (default: :obj:`None`) - norm_kwargs (Dict[str, Any], optional): Arguments passed to the - respective normalization function defined by :obj:`norm`. - (default: :obj:`None`) - jk (str, optional): The Jumping Knowledge mode. If specified, the model - will additionally apply a final linear transformation to transform - node embeddings to the expected output feature dimensionality. - (:obj:`None`, :obj:`"last"`, :obj:`"cat"`, :obj:`"max"`, - :obj:`"lstm"`). (default: :obj:`None`) - **kwargs (optional): Additional arguments of - :class:`torch_geometric.nn.conv.PNAConv`. - """ - supports_edge_weight = False - supports_edge_attr = True - - def init_conv(self, in_channels: int, out_channels: int, - **kwargs) -> MessagePassing: - return PNAConv(in_channels, out_channels, **kwargs) - - -class EdgeCNN(BasicGNN): - r"""The Graph Neural Network from the `"Dynamic Graph CNN for Learning on - Point Clouds" `_ paper, using the - :class:`~torch_geometric.nn.conv.EdgeConv` operator for message passing. - - Args: - in_channels (int): Size of each input sample. - hidden_channels (int): Size of each hidden sample. - num_layers (int): Number of message passing layers. - out_channels (int, optional): If not set to :obj:`None`, will apply a - final linear transformation to convert hidden node embeddings to - output size :obj:`out_channels`. (default: :obj:`None`) - dropout (float, optional): Dropout probability. (default: :obj:`0.`) - act (str or Callable, optional): The non-linear activation function to - use. (default: :obj:`"relu"`) - act_first (bool, optional): If set to :obj:`True`, activation is - applied before normalization. (default: :obj:`False`) - act_kwargs (Dict[str, Any], optional): Arguments passed to the - respective activation function defined by :obj:`act`. - (default: :obj:`None`) - norm (str or Callable, optional): The normalization function to - use. (default: :obj:`None`) - norm_kwargs (Dict[str, Any], optional): Arguments passed to the - respective normalization function defined by :obj:`norm`. - (default: :obj:`None`) - jk (str, optional): The Jumping Knowledge mode. If specified, the model - will additionally apply a final linear transformation to transform - node embeddings to the expected output feature dimensionality. - (:obj:`None`, :obj:`"last"`, :obj:`"cat"`, :obj:`"max"`, - :obj:`"lstm"`). (default: :obj:`None`) - **kwargs (optional): Additional arguments of - :class:`torch_geometric.nn.conv.EdgeConv`. - """ - supports_edge_weight = False - supports_edge_attr = False - - def init_conv(self, in_channels: int, out_channels: int, - **kwargs) -> MessagePassing: - mlp = MLP( - [2 * in_channels, out_channels, out_channels], - act=self.act, - act_first=self.act_first, - norm=self.norm, - norm_kwargs=self.norm_kwargs, - ) - return EdgeConv(mlp, **kwargs) - - -__all__ = ['GCN', 'GraphSAGE', 'GIN', 'GAT', 'PNA', 'EdgeCNN'] diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/models/lightgcn.py b/pytorch_geometric-2.3.1/torch_geometric/nn/models/lightgcn.py deleted file mode 100644 index f174a4b..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/models/lightgcn.py +++ /dev/null @@ -1,263 +0,0 @@ -from typing import Optional, Union - -import torch -import torch.nn.functional as F -from torch import Tensor -from torch.nn import Embedding, ModuleList -from torch.nn.modules.loss import _Loss - -from torch_geometric.nn.conv import LGConv -from torch_geometric.typing import Adj, OptTensor, SparseTensor - - -class LightGCN(torch.nn.Module): - r"""The LightGCN model from the `"LightGCN: Simplifying and Powering - Graph Convolution Network for Recommendation" - `_ paper. - - :class:`~torch_geometric.nn.models.LightGCN` learns embeddings by linearly - propagating them on the underlying graph, and uses the weighted sum of the - embeddings learned at all layers as the final embedding - - .. math:: - \textbf{x}_i = \sum_{l=0}^{L} \alpha_l \textbf{x}^{(l)}_i, - - where each layer's embedding is computed as - - .. math:: - \mathbf{x}^{(l+1)}_i = \sum_{j \in \mathcal{N}(i)} - \frac{1}{\sqrt{\deg(i)\deg(j)}}\mathbf{x}^{(l)}_j. - - Two prediction heads and training objectives are provided: - **link prediction** (via - :meth:`~torch_geometric.nn.models.LightGCN.link_pred_loss` and - :meth:`~torch_geometric.nn.models.LightGCN.predict_link`) and - **recommendation** (via - :meth:`~torch_geometric.nn.models.LightGCN.recommendation_loss` and - :meth:`~torch_geometric.nn.models.LightGCN.recommend`). - - .. note:: - - Embeddings are propagated according to the graph connectivity specified - by :obj:`edge_index` while rankings or link probabilities are computed - according to the edges specified by :obj:`edge_label_index`. - - Args: - num_nodes (int): The number of nodes in the graph. - embedding_dim (int): The dimensionality of node embeddings. - num_layers (int): The number of - :class:`~torch_geometric.nn.conv.LGConv` layers. - alpha (float or torch.Tensor, optional): The scalar or vector - specifying the re-weighting coefficients for aggregating the final - embedding. If set to :obj:`None`, the uniform initialization of - :obj:`1 / (num_layers + 1)` is used. (default: :obj:`None`) - **kwargs (optional): Additional arguments of the underlying - :class:`~torch_geometric.nn.conv.LGConv` layers. - """ - def __init__( - self, - num_nodes: int, - embedding_dim: int, - num_layers: int, - alpha: Optional[Union[float, Tensor]] = None, - **kwargs, - ): - super().__init__() - - self.num_nodes = num_nodes - self.embedding_dim = embedding_dim - self.num_layers = num_layers - - if alpha is None: - alpha = 1. / (num_layers + 1) - - if isinstance(alpha, Tensor): - assert alpha.size(0) == num_layers + 1 - else: - alpha = torch.tensor([alpha] * (num_layers + 1)) - self.register_buffer('alpha', alpha) - - self.embedding = Embedding(num_nodes, embedding_dim) - self.convs = ModuleList([LGConv(**kwargs) for _ in range(num_layers)]) - - self.reset_parameters() - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - torch.nn.init.xavier_uniform_(self.embedding.weight) - for conv in self.convs: - conv.reset_parameters() - - def get_embedding(self, edge_index: Adj) -> Tensor: - r"""Returns the embedding of nodes in the graph.""" - x = self.embedding.weight - out = x * self.alpha[0] - - for i in range(self.num_layers): - x = self.convs[i](x, edge_index) - out = out + x * self.alpha[i + 1] - - return out - - def forward(self, edge_index: Adj, - edge_label_index: OptTensor = None) -> Tensor: - r"""Computes rankings for pairs of nodes. - - Args: - edge_index (torch.Tensor or SparseTensor): Edge tensor specifying - the connectivity of the graph. - edge_label_index (torch.Tensor, optional): Edge tensor specifying - the node pairs for which to compute rankings or probabilities. - If :obj:`edge_label_index` is set to :obj:`None`, all edges in - :obj:`edge_index` will be used instead. (default: :obj:`None`) - """ - if edge_label_index is None: - if isinstance(edge_index, SparseTensor): - edge_label_index = torch.stack(edge_index.coo()[:2], dim=0) - else: - edge_label_index = edge_index - - out = self.get_embedding(edge_index) - - out_src = out[edge_label_index[0]] - out_dst = out[edge_label_index[1]] - return (out_src * out_dst).sum(dim=-1) - - def predict_link(self, edge_index: Adj, edge_label_index: OptTensor = None, - prob: bool = False) -> Tensor: - r"""Predict links between nodes specified in :obj:`edge_label_index`. - - Args: - prob (bool, optional): Whether probabilities should be returned. - (default: :obj:`False`) - """ - pred = self(edge_index, edge_label_index).sigmoid() - return pred if prob else pred.round() - - def recommend(self, edge_index: Adj, src_index: OptTensor = None, - dst_index: OptTensor = None, k: int = 1) -> Tensor: - r"""Get top-:math:`k` recommendations for nodes in :obj:`src_index`. - - Args: - src_index (torch.Tensor, optional): Node indices for which - recommendations should be generated. - If set to :obj:`None`, all nodes will be used. - (default: :obj:`None`) - dst_index (torch.Tensor, optional): Node indices which represent - the possible recommendation choices. - If set to :obj:`None`, all nodes will be used. - (default: :obj:`None`) - k (int, optional): Number of recommendations. (default: :obj:`1`) - """ - out_src = out_dst = self.get_embedding(edge_index) - - if src_index is not None: - out_src = out_src[src_index] - - if dst_index is not None: - out_dst = out_dst[dst_index] - - pred = out_src @ out_dst.t() - top_index = pred.topk(k, dim=-1).indices - - if dst_index is not None: # Map local top-indices to original indices. - top_index = dst_index[top_index.view(-1)].view(*top_index.size()) - - return top_index - - def link_pred_loss(self, pred: Tensor, edge_label: Tensor, - **kwargs) -> Tensor: - r"""Computes the model loss for a link prediction objective via the - :class:`torch.nn.BCEWithLogitsLoss`. - - Args: - pred (torch.Tensor): The predictions. - edge_label (torch.Tensor): The ground-truth edge labels. - **kwargs (optional): Additional arguments of the underlying - :class:`torch.nn.BCEWithLogitsLoss` loss function. - """ - loss_fn = torch.nn.BCEWithLogitsLoss(**kwargs) - return loss_fn(pred, edge_label.to(pred.dtype)) - - def recommendation_loss(self, pos_edge_rank: Tensor, neg_edge_rank: Tensor, - lambda_reg: float = 1e-4, **kwargs) -> Tensor: - r"""Computes the model loss for a ranking objective via the Bayesian - Personalized Ranking (BPR) loss. - - .. note:: - - The i-th entry in the :obj:`pos_edge_rank` vector and i-th entry - in the :obj:`neg_edge_rank` entry must correspond to ranks of - positive and negative edges of the same entity (*e.g.*, user). - - Args: - pos_edge_rank (torch.Tensor): Positive edge rankings. - neg_edge_rank (torch.Tensor): Negative edge rankings. - lambda_reg (int, optional): The :math:`L_2` regularization strength - of the Bayesian Personalized Ranking (BPR) loss. - (default: :obj:`1e-4`) - **kwargs (optional): Additional arguments of the underlying - :class:`torch_geometric.nn.models.lightgcn.BPRLoss` loss - function. - """ - loss_fn = BPRLoss(lambda_reg, **kwargs) - return loss_fn(pos_edge_rank, neg_edge_rank, self.embedding.weight) - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.num_nodes}, ' - f'{self.embedding_dim}, num_layers={self.num_layers})') - - -class BPRLoss(_Loss): - r"""The Bayesian Personalized Ranking (BPR) loss. - - The BPR loss is a pairwise loss that encourages the prediction of an - observed entry to be higher than its unobserved counterparts - (see `here `__). - - .. math:: - L_{\text{BPR}} = - \sum_{u=1}^{M} \sum_{i \in \mathcal{N}_u} - \sum_{j \not\in \mathcal{N}_u} \ln \sigma(\hat{y}_{ui} - \hat{y}_{uj}) - + \lambda \vert\vert \textbf{x}^{(0)} \vert\vert^2 - - where :math:`lambda` controls the :math:`L_2` regularization strength. - We compute the mean BPR loss for simplicity. - - Args: - lambda_reg (float, optional): The :math:`L_2` regularization strength - (default: 0). - **kwargs (optional): Additional arguments of the underlying - :class:`torch.nn.modules.loss._Loss` class. - """ - __constants__ = ['lambda_reg'] - lambda_reg: float - - def __init__(self, lambda_reg: float = 0, **kwargs): - super().__init__(None, None, "sum", **kwargs) - self.lambda_reg = lambda_reg - - def forward(self, positives: Tensor, negatives: Tensor, - parameters: Tensor = None) -> Tensor: - r"""Compute the mean Bayesian Personalized Ranking (BPR) loss. - - .. note:: - - The i-th entry in the :obj:`positives` vector and i-th entry - in the :obj:`negatives` entry should correspond to the same - entity (*.e.g*, user), as the BPR is a personalized ranking loss. - - Args: - positives (Tensor): The vector of positive-pair rankings. - negatives (Tensor): The vector of negative-pair rankings. - parameters (Tensor, optional): The tensor of parameters which - should be used for :math:`L_2` regularization - (default: :obj:`None`). - """ - n_pairs = positives.size(0) - log_prob = F.logsigmoid(positives - negatives).mean() - regularization = 0 - - if self.lambda_reg != 0: - regularization = self.lambda_reg * parameters.norm(p=2).pow(2) - - return (-log_prob + regularization) / n_pairs diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/models/linkx.py b/pytorch_geometric-2.3.1/torch_geometric/nn/models/linkx.py deleted file mode 100644 index 5804f53..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/models/linkx.py +++ /dev/null @@ -1,227 +0,0 @@ -import math - -import torch -from torch import Tensor -from torch.nn import BatchNorm1d, Parameter - -from torch_geometric.nn import inits -from torch_geometric.nn.conv import MessagePassing -from torch_geometric.nn.models import MLP -from torch_geometric.typing import Adj, OptTensor, SparseTensor -from torch_geometric.utils import spmm - - -class SparseLinear(MessagePassing): - def __init__(self, in_channels: int, out_channels: int, bias: bool = True): - super().__init__(aggr='add') - self.in_channels = in_channels - self.out_channels = out_channels - - self.weight = Parameter(torch.Tensor(in_channels, out_channels)) - if bias: - self.bias = Parameter(torch.Tensor(out_channels)) - else: - self.register_parameter('bias', None) - - self.reset_parameters() - - def reset_parameters(self): - inits.kaiming_uniform(self.weight, fan=self.in_channels, - a=math.sqrt(5)) - inits.uniform(self.in_channels, self.bias) - - @torch.jit._overload_method - def forward(self, edge_index, edge_weight=None): - # type: (SparseTensor, OptTensor) -> Tensor - pass - - @torch.jit._overload_method - def forward(self, edge_index, edge_weight=None): - # type: (Tensor, OptTensor) -> Tensor - pass - - def forward( - self, - edge_index: Adj, - edge_weight: OptTensor = None, - ) -> Tensor: - # propagate_type: (weight: Tensor, edge_weight: OptTensor) - out = self.propagate(edge_index, weight=self.weight, - edge_weight=edge_weight, size=None) - - if self.bias is not None: - out = out + self.bias - - return out - - def message(self, weight_j: Tensor, edge_weight: OptTensor) -> Tensor: - if edge_weight is None: - return weight_j - else: - return edge_weight.view(-1, 1) * weight_j - - def message_and_aggregate(self, adj_t: SparseTensor, - weight: Tensor) -> Tensor: - return spmm(adj_t, weight, reduce=self.aggr) - - -class LINKX(torch.nn.Module): - r"""The LINKX model from the `"Large Scale Learning on Non-Homophilous - Graphs: New Benchmarks and Strong Simple Methods" - `_ paper - - .. math:: - \mathbf{H}_{\mathbf{A}} &= \textrm{MLP}_{\mathbf{A}}(\mathbf{A}) - - \mathbf{H}_{\mathbf{X}} &= \textrm{MLP}_{\mathbf{X}}(\mathbf{X}) - - \mathbf{Y} &= \textrm{MLP}_{f} \left( \sigma \left( \mathbf{W} - [\mathbf{H}_{\mathbf{A}}, \mathbf{H}_{\mathbf{X}}] + - \mathbf{H}_{\mathbf{A}} + \mathbf{H}_{\mathbf{X}} \right) \right) - - .. note:: - - For an example of using LINKX, see `examples/linkx.py `_. - - Args: - num_nodes (int): The number of nodes in the graph. - in_channels (int): Size of each input sample, or :obj:`-1` to derive - the size from the first input(s) to the forward method. - hidden_channels (int): Size of each hidden sample. - out_channels (int): Size of each output sample. - num_layers (int): Number of layers of :math:`\textrm{MLP}_{f}`. - num_edge_layers (int, optional): Number of layers of - :math:`\textrm{MLP}_{\mathbf{A}}`. (default: :obj:`1`) - num_node_layers (int, optional): Number of layers of - :math:`\textrm{MLP}_{\mathbf{X}}`. (default: :obj:`1`) - dropout (float, optional): Dropout probability of each hidden - embedding. (default: :obj:`0.0`) - """ - def __init__( - self, - num_nodes: int, - in_channels: int, - hidden_channels: int, - out_channels: int, - num_layers: int, - num_edge_layers: int = 1, - num_node_layers: int = 1, - dropout: float = 0.0, - ): - super().__init__() - - self.num_nodes = num_nodes - self.in_channels = in_channels - self.out_channels = out_channels - self.num_edge_layers = num_edge_layers - - self.edge_lin = SparseLinear(num_nodes, hidden_channels) - - if self.num_edge_layers > 1: - self.edge_norm = BatchNorm1d(hidden_channels) - channels = [hidden_channels] * num_edge_layers - self.edge_mlp = MLP(channels, dropout=0., act_first=True) - else: - self.edge_norm = None - self.edge_mlp = None - - channels = [in_channels] + [hidden_channels] * num_node_layers - self.node_mlp = MLP(channels, dropout=0., act_first=True) - - self.cat_lin1 = torch.nn.Linear(hidden_channels, hidden_channels) - self.cat_lin2 = torch.nn.Linear(hidden_channels, hidden_channels) - - channels = [hidden_channels] * num_layers + [out_channels] - self.final_mlp = MLP(channels, dropout=dropout, act_first=True) - - self.reset_parameters() - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - self.edge_lin.reset_parameters() - if self.edge_norm is not None: - self.edge_norm.reset_parameters() - if self.edge_mlp is not None: - self.edge_mlp.reset_parameters() - self.node_mlp.reset_parameters() - self.cat_lin1.reset_parameters() - self.cat_lin2.reset_parameters() - self.final_mlp.reset_parameters() - - @torch.jit._overload_method - def forward(self, x, edge_index, edge_weight=None): - # type: (OptTensor, SparseTensor, OptTensor) -> Tensor - pass - - @torch.jit._overload_method - def forward(self, x, edge_index, edge_weight=None): - # type: (OptTensor, Tensor, OptTensor) -> Tensor - pass - - def forward( - self, - x: OptTensor, - edge_index: Adj, - edge_weight: OptTensor = None, - ) -> Tensor: - """""" - out = self.edge_lin(edge_index, edge_weight) - - if self.edge_norm is not None and self.edge_mlp is not None: - out = out.relu_() - out = self.edge_norm(out) - out = self.edge_mlp(out) - - out = out + self.cat_lin1(out) - - if x is not None: - x = self.node_mlp(x) - out = out + x - out = out + self.cat_lin2(x) - - return self.final_mlp(out.relu_()) - - def jittable(self, typing: str) -> torch.nn.Module: # pragma: no cover - edge_index_type = typing.split(',')[1].strip() - - class EdgeIndexJittable(torch.nn.Module): - def __init__(self, child): - super().__init__() - self.child = child - - def reset_parameters(self): - self.child.reset_parameters() - - def forward(self, x: Tensor, edge_index: Tensor, - edge_weight: OptTensor = None) -> Tensor: - return self.child(x, edge_index, edge_weight) - - class SparseTensorJittable(torch.nn.Module): - def __init__(self, child): - super().__init__() - self.child = child - - def reset_parameters(self): - self.child.reset_parameters() - - def forward(self, x: Tensor, edge_index: SparseTensor, - edge_weight: OptTensor = None): - return self.child(x, edge_index, edge_weight) - - if self.edge_lin.jittable is not None: - self.edge_lin = self.edge_lin.jittable() - - if 'Tensor' == edge_index_type: - jittable_module = EdgeIndexJittable(self) - elif 'SparseTensor' == edge_index_type: - jittable_module = SparseTensorJittable(self) - else: - raise ValueError(f"Could not parse types '{typing}'") - - return jittable_module - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}(num_nodes={self.num_nodes}, ' - f'in_channels={self.in_channels}, ' - f'out_channels={self.out_channels})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/models/mlp.py b/pytorch_geometric-2.3.1/torch_geometric/nn/models/mlp.py deleted file mode 100644 index 402a6c0..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/models/mlp.py +++ /dev/null @@ -1,217 +0,0 @@ -import warnings -from typing import Any, Callable, Dict, List, Optional, Union - -import torch -import torch.nn.functional as F -from torch import Tensor -from torch.nn import Identity - -from torch_geometric.nn.dense.linear import Linear -from torch_geometric.nn.resolver import ( - activation_resolver, - normalization_resolver, -) -from torch_geometric.typing import NoneType - - -class MLP(torch.nn.Module): - r"""A Multi-Layer Perception (MLP) model. - There exists two ways to instantiate an :class:`MLP`: - - 1. By specifying explicit channel sizes, *e.g.*, - - .. code-block:: python - - mlp = MLP([16, 32, 64, 128]) - - creates a three-layer MLP with **differently** sized hidden layers. - - 1. By specifying fixed hidden channel sizes over a number of layers, - *e.g.*, - - .. code-block:: python - - mlp = MLP(in_channels=16, hidden_channels=32, - out_channels=128, num_layers=3) - - creates a three-layer MLP with **equally** sized hidden layers. - - Args: - channel_list (List[int] or int, optional): List of input, intermediate - and output channels such that :obj:`len(channel_list) - 1` denotes - the number of layers of the MLP (default: :obj:`None`) - in_channels (int, optional): Size of each input sample. - Will override :attr:`channel_list`. (default: :obj:`None`) - hidden_channels (int, optional): Size of each hidden sample. - Will override :attr:`channel_list`. (default: :obj:`None`) - out_channels (int, optional): Size of each output sample. - Will override :attr:`channel_list`. (default: :obj:`None`) - num_layers (int, optional): The number of layers. - Will override :attr:`channel_list`. (default: :obj:`None`) - dropout (float or List[float], optional): Dropout probability of each - hidden embedding. If a list is provided, sets the dropout value per - layer. (default: :obj:`0.`) - act (str or Callable, optional): The non-linear activation function to - use. (default: :obj:`"relu"`) - act_first (bool, optional): If set to :obj:`True`, activation is - applied before normalization. (default: :obj:`False`) - act_kwargs (Dict[str, Any], optional): Arguments passed to the - respective activation function defined by :obj:`act`. - (default: :obj:`None`) - norm (str or Callable, optional): The normalization function to - use. (default: :obj:`"batch_norm"`) - norm_kwargs (Dict[str, Any], optional): Arguments passed to the - respective normalization function defined by :obj:`norm`. - (default: :obj:`None`) - plain_last (bool, optional): If set to :obj:`False`, will apply - non-linearity, batch normalization and dropout to the last layer as - well. (default: :obj:`True`) - bias (bool or List[bool], optional): If set to :obj:`False`, the module - will not learn additive biases. If a list is provided, sets the - bias per layer. (default: :obj:`True`) - **kwargs (optional): Additional deprecated arguments of the MLP layer. - """ - def __init__( - self, - channel_list: Optional[Union[List[int], int]] = None, - *, - in_channels: Optional[int] = None, - hidden_channels: Optional[int] = None, - out_channels: Optional[int] = None, - num_layers: Optional[int] = None, - dropout: Union[float, List[float]] = 0., - act: Union[str, Callable, None] = "relu", - act_first: bool = False, - act_kwargs: Optional[Dict[str, Any]] = None, - norm: Union[str, Callable, None] = "batch_norm", - norm_kwargs: Optional[Dict[str, Any]] = None, - plain_last: bool = True, - bias: Union[bool, List[bool]] = True, - **kwargs, - ): - super().__init__() - - # Backward compatibility: - act_first = act_first or kwargs.get("relu_first", False) - batch_norm = kwargs.get("batch_norm", None) - if batch_norm is not None and isinstance(batch_norm, bool): - warnings.warn("Argument `batch_norm` is deprecated, " - "please use `norm` to specify normalization layer.") - norm = 'batch_norm' if batch_norm else None - batch_norm_kwargs = kwargs.get("batch_norm_kwargs", None) - norm_kwargs = batch_norm_kwargs or {} - - if isinstance(channel_list, int): - in_channels = channel_list - - if in_channels is not None: - if num_layers is None: - raise ValueError("Argument `num_layers` must be given") - if num_layers > 1 and hidden_channels is None: - raise ValueError(f"Argument `hidden_channels` must be given " - f"for `num_layers={num_layers}`") - if out_channels is None: - raise ValueError("Argument `out_channels` must be given") - - channel_list = [hidden_channels] * (num_layers - 1) - channel_list = [in_channels] + channel_list + [out_channels] - - assert isinstance(channel_list, (tuple, list)) - assert len(channel_list) >= 2 - self.channel_list = channel_list - - self.act = activation_resolver(act, **(act_kwargs or {})) - self.act_first = act_first - self.plain_last = plain_last - - if isinstance(dropout, float): - dropout = [dropout] * (len(channel_list) - 1) - if plain_last: - dropout[-1] = 0. - if len(dropout) != len(channel_list) - 1: - raise ValueError( - f"Number of dropout values provided ({len(dropout)} does not " - f"match the number of layers specified " - f"({len(channel_list)-1})") - self.dropout = dropout - - if isinstance(bias, bool): - bias = [bias] * (len(channel_list) - 1) - if len(bias) != len(channel_list) - 1: - raise ValueError( - f"Number of bias values provided ({len(bias)}) does not match " - f"the number of layers specified ({len(channel_list)-1})") - - self.lins = torch.nn.ModuleList() - iterator = zip(channel_list[:-1], channel_list[1:], bias) - for in_channels, out_channels, _bias in iterator: - self.lins.append(Linear(in_channels, out_channels, bias=_bias)) - - self.norms = torch.nn.ModuleList() - iterator = channel_list[1:-1] if plain_last else channel_list[1:] - for hidden_channels in iterator: - if norm is not None: - norm_layer = normalization_resolver( - norm, - hidden_channels, - **(norm_kwargs or {}), - ) - else: - norm_layer = Identity() - self.norms.append(norm_layer) - - self.reset_parameters() - - @property - def in_channels(self) -> int: - r"""Size of each input sample.""" - return self.channel_list[0] - - @property - def out_channels(self) -> int: - r"""Size of each output sample.""" - return self.channel_list[-1] - - @property - def num_layers(self) -> int: - r"""The number of layers.""" - return len(self.channel_list) - 1 - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - for lin in self.lins: - lin.reset_parameters() - for norm in self.norms: - if hasattr(norm, 'reset_parameters'): - norm.reset_parameters() - - def forward( - self, - x: Tensor, - return_emb: NoneType = None, - ) -> Tensor: - r""" - Args: - x (torch.Tensor): The source tensor. - return_emb (bool, optional): If set to :obj:`True`, will - additionally return the embeddings before execution of to the - final output layer. (default: :obj:`False`) - """ - for i, (lin, norm) in enumerate(zip(self.lins, self.norms)): - x = lin(x) - if self.act is not None and self.act_first: - x = self.act(x) - x = norm(x) - if self.act is not None and not self.act_first: - x = self.act(x) - x = F.dropout(x, p=self.dropout[i], training=self.training) - emb = x - - if self.plain_last: - x = self.lins[-1](x) - x = F.dropout(x, p=self.dropout[-1], training=self.training) - - return (x, emb) if isinstance(return_emb, bool) else x - - def __repr__(self) -> str: - return f'{self.__class__.__name__}({str(self.channel_list)[1:-1]})' diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/models/node2vec.py b/pytorch_geometric-2.3.1/torch_geometric/nn/models/node2vec.py deleted file mode 100644 index 9d52543..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/models/node2vec.py +++ /dev/null @@ -1,185 +0,0 @@ -from typing import Optional, Tuple - -import torch -from torch import Tensor -from torch.nn import Embedding -from torch.utils.data import DataLoader - -from torch_geometric.typing import OptTensor, SparseTensor -from torch_geometric.utils.num_nodes import maybe_num_nodes - -try: - import torch_cluster # noqa - random_walk = torch.ops.torch_cluster.random_walk -except ImportError: - random_walk = None - - -class Node2Vec(torch.nn.Module): - r"""The Node2Vec model from the - `"node2vec: Scalable Feature Learning for Networks" - `_ paper where random walks of - length :obj:`walk_length` are sampled in a given graph, and node embeddings - are learned via negative sampling optimization. - - .. note:: - - For an example of using Node2Vec, see `examples/node2vec.py - `_. - - Args: - edge_index (torch.Tensor): The edge indices. - embedding_dim (int): The size of each embedding vector. - walk_length (int): The walk length. - context_size (int): The actual context size which is considered for - positive samples. This parameter increases the effective sampling - rate by reusing samples across different source nodes. - walks_per_node (int, optional): The number of walks to sample for each - node. (default: :obj:`1`) - p (float, optional): Likelihood of immediately revisiting a node in the - walk. (default: :obj:`1`) - q (float, optional): Control parameter to interpolate between - breadth-first strategy and depth-first strategy (default: :obj:`1`) - num_negative_samples (int, optional): The number of negative samples to - use for each positive sample. (default: :obj:`1`) - num_nodes (int, optional): The number of nodes. (default: :obj:`None`) - sparse (bool, optional): If set to :obj:`True`, gradients w.r.t. to the - weight matrix will be sparse. (default: :obj:`False`) - """ - def __init__( - self, - edge_index: Tensor, - embedding_dim: int, - walk_length: int, - context_size: int, - walks_per_node: int = 1, - p: float = 1.0, - q: float = 1.0, - num_negative_samples: int = 1, - num_nodes: Optional[int] = None, - sparse: bool = False, - ): - super().__init__() - - if random_walk is None: - raise ImportError('`Node2Vec` requires `torch-cluster`.') - - N = maybe_num_nodes(edge_index, num_nodes) - row, col = edge_index - self.adj = SparseTensor(row=row, col=col, sparse_sizes=(N, N)) - self.adj = self.adj.to('cpu') - self.EPS = 1e-15 - assert walk_length >= context_size - - self.embedding_dim = embedding_dim - self.walk_length = walk_length - 1 - self.context_size = context_size - self.walks_per_node = walks_per_node - self.p = p - self.q = q - self.num_negative_samples = num_negative_samples - - self.embedding = Embedding(N, embedding_dim, sparse=sparse) - - self.reset_parameters() - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - self.embedding.reset_parameters() - - def forward(self, batch: OptTensor = None) -> Tensor: - """Returns the embeddings for the nodes in :obj:`batch`.""" - emb = self.embedding.weight - return emb if batch is None else emb.index_select(0, batch) - - def loader(self, **kwargs) -> DataLoader: - return DataLoader(range(self.adj.sparse_size(0)), - collate_fn=self.sample, **kwargs) - - @torch.jit.export - def pos_sample(self, batch: Tensor) -> Tensor: - batch = batch.repeat(self.walks_per_node) - rowptr, col, _ = self.adj.csr() - rw = random_walk(rowptr, col, batch, self.walk_length, self.p, self.q) - if not isinstance(rw, Tensor): - rw = rw[0] - - walks = [] - num_walks_per_rw = 1 + self.walk_length + 1 - self.context_size - for j in range(num_walks_per_rw): - walks.append(rw[:, j:j + self.context_size]) - return torch.cat(walks, dim=0) - - @torch.jit.export - def neg_sample(self, batch: Tensor) -> Tensor: - batch = batch.repeat(self.walks_per_node * self.num_negative_samples) - - rw = torch.randint(self.adj.sparse_size(0), - (batch.size(0), self.walk_length)) - rw = torch.cat([batch.view(-1, 1), rw], dim=-1) - - walks = [] - num_walks_per_rw = 1 + self.walk_length + 1 - self.context_size - for j in range(num_walks_per_rw): - walks.append(rw[:, j:j + self.context_size]) - return torch.cat(walks, dim=0) - - @torch.jit.export - def sample(self, batch: Tensor) -> Tuple[Tensor, Tensor]: - if not isinstance(batch, Tensor): - batch = torch.tensor(batch) - return self.pos_sample(batch), self.neg_sample(batch) - - @torch.jit.export - def loss(self, pos_rw: Tensor, neg_rw: Tensor) -> Tensor: - r"""Computes the loss given positive and negative random walks.""" - - # Positive loss. - start, rest = pos_rw[:, 0], pos_rw[:, 1:].contiguous() - - h_start = self.embedding(start).view(pos_rw.size(0), 1, - self.embedding_dim) - h_rest = self.embedding(rest.view(-1)).view(pos_rw.size(0), -1, - self.embedding_dim) - - out = (h_start * h_rest).sum(dim=-1).view(-1) - pos_loss = -torch.log(torch.sigmoid(out) + self.EPS).mean() - - # Negative loss. - start, rest = neg_rw[:, 0], neg_rw[:, 1:].contiguous() - - h_start = self.embedding(start).view(neg_rw.size(0), 1, - self.embedding_dim) - h_rest = self.embedding(rest.view(-1)).view(neg_rw.size(0), -1, - self.embedding_dim) - - out = (h_start * h_rest).sum(dim=-1).view(-1) - neg_loss = -torch.log(1 - torch.sigmoid(out) + self.EPS).mean() - - return pos_loss + neg_loss - - def test( - self, - train_z: Tensor, - train_y: Tensor, - test_z: Tensor, - test_y: Tensor, - solver: str = 'lbfgs', - multi_class: str = 'auto', - *args, - **kwargs, - ) -> float: - r"""Evaluates latent space quality via a logistic regression downstream - task.""" - from sklearn.linear_model import LogisticRegression - - clf = LogisticRegression(solver=solver, multi_class=multi_class, *args, - **kwargs).fit(train_z.detach().cpu().numpy(), - train_y.detach().cpu().numpy()) - return clf.score(test_z.detach().cpu().numpy(), - test_y.detach().cpu().numpy()) - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.embedding.weight.size(0)}, ' - f'{self.embedding.weight.size(1)})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/models/rect.py b/pytorch_geometric-2.3.1/torch_geometric/nn/models/rect.py deleted file mode 100644 index 6f6a772..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/models/rect.py +++ /dev/null @@ -1,152 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import Tensor -from torch.nn import Linear - -from torch_geometric.nn import GCNConv -from torch_geometric.typing import Adj, OptTensor, SparseTensor -from torch_geometric.utils import scatter - - -class RECT_L(torch.nn.Module): - r"""The RECT model, *i.e.* its supervised RECT-L part, from the - `"Network Embedding with Completely-imbalanced Labels" - `_ paper. - In particular, a GCN model is trained that reconstructs semantic class - knowledge. - - .. note:: - - For an example of using RECT, see `examples/rect.py - `_. - - Args: - in_channels (int): Size of each input sample. - hidden_channels (int): Intermediate size of each sample. - normalize (bool, optional): Whether to add self-loops and compute - symmetric normalization coefficients on-the-fly. - (default: :obj:`True`) - dropout (float, optional): The dropout probability. - (default: :obj:`0.0`) - """ - def __init__(self, in_channels: int, hidden_channels: int, - normalize: bool = True, dropout: float = 0.0): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.dropout = dropout - - self.conv = GCNConv(in_channels, hidden_channels, normalize=normalize) - self.lin = Linear(hidden_channels, in_channels) - - self.reset_parameters() - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - self.conv.reset_parameters() - self.lin.reset_parameters() - torch.nn.init.xavier_uniform_(self.lin.weight.data) - - @torch.jit._overload_method - def forward(self, x, edge_index, edge_weight=None): - # type: (Tensor, SparseTensor, OptTensor) -> Tensor - pass - - @torch.jit._overload_method - def forward(self, x, edge_index, edge_weight=None): - # type: (Tensor, Tensor, OptTensor) -> Tensor - pass - - def forward(self, x: Tensor, edge_index: Adj, - edge_weight: OptTensor = None) -> Tensor: - """""" - x = self.conv(x, edge_index, edge_weight) - x = F.dropout(x, p=self.dropout, training=self.training) - return self.lin(x) - - @torch.jit._overload_method - def embed(self, x, edge_index, edge_weight=None): - # type: (Tensor, SparseTensor, OptTensor) -> Tensor - pass - - @torch.jit._overload_method - def embed(self, x, edge_index, edge_weight=None): - # type: (Tensor, Tensor, OptTensor) -> Tensor - pass - - def embed(self, x: Tensor, edge_index: Adj, - edge_weight: OptTensor = None) -> Tensor: - with torch.no_grad(): - return self.conv(x, edge_index, edge_weight) - - def get_semantic_labels(self, x: Tensor, y: Tensor, - mask: Tensor) -> Tensor: - r"""Replaces the original labels by their class-centers.""" - with torch.no_grad(): - y = y[mask] - mean = scatter(x[mask], y, dim=0, reduce='mean') - return mean[y] - - def jittable(self, typing: str) -> torch.nn.Module: # pragma: no cover - edge_index_type = typing.split(',')[1].strip() - - class EdgeIndexJittable(torch.nn.Module): - def __init__(self, child): - super().__init__() - self.child = child - - def reset_parameters(self): - self.child.reset_parameters() - - def forward(self, x: Tensor, edge_index: Tensor, - edge_weight: OptTensor = None) -> Tensor: - return self.child(x, edge_index, edge_weight) - - @torch.jit.export - def embed(self, x: Tensor, edge_index: Tensor, - edge_weight: OptTensor = None) -> Tensor: - return self.child.embed(x, edge_index, edge_weight) - - @torch.jit.export - def get_semantic_labels(self, x: Tensor, y: Tensor, - mask: Tensor) -> Tensor: - return self.child.get_semantic_labels(x, y, mask) - - class SparseTensorJittable(torch.nn.Module): - def __init__(self, child): - super().__init__() - self.child = child - - def reset_parameters(self): - self.child.reset_parameters() - - def forward(self, x: Tensor, edge_index: SparseTensor, - edge_weight: OptTensor = None): - return self.child(x, edge_index, edge_weight) - - @torch.jit.export - def embed(self, x: Tensor, edge_index: SparseTensor, - edge_weight: OptTensor = None) -> Tensor: - return self.child.embed(x, edge_index, edge_weight) - - @torch.jit.export - def get_semantic_labels(self, x: Tensor, y: Tensor, - mask: Tensor) -> Tensor: - return self.child.get_semantic_labels(x, y, mask) - - if self.conv.jittable is not None: - self.conv = self.conv.jittable() - - if 'Tensor' == edge_index_type: - jittable_module = EdgeIndexJittable(self) - elif 'SparseTensor' == edge_index_type: - jittable_module = SparseTensorJittable(self) - else: - raise ValueError(f"Could not parse types '{typing}'") - - return jittable_module - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.in_channels}, ' - f'{self.hidden_channels})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/models/rev_gnn.py b/pytorch_geometric-2.3.1/torch_geometric/nn/models/rev_gnn.py deleted file mode 100644 index 662457b..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/models/rev_gnn.py +++ /dev/null @@ -1,318 +0,0 @@ -import copy -from abc import ABC, abstractmethod -from typing import Any, List, Optional, Union - -import numpy as np -import torch -from torch import Tensor - -import torch_geometric.typing -from torch_geometric.typing import Adj - - -class InvertibleFunction(torch.autograd.Function): - r"""An invertible autograd function. This allows for automatic - backpropagation in a reversible fashion so that the memory of intermediate - results can be freed during the forward pass and be constructed on-the-fly - during the bachward pass. - - Args: - ctx (torch.autograd.function.InvertibleFunctionBackward): - A context object that can be used to stash information for backward - computation. - fn (torch.nn.Module): The forward function. - fn_inverse (torch.nn.Module): The inverse function to recompute the - freed input. - num_bwd_passes (int): Number of backward passes to retain a link - with the output. After the last backward pass the output is - discarded and memory is freed. - num_inputs (int): The number of inputs to the forward function. - *args (tuple): Inputs and weights. - """ - @staticmethod - def forward(ctx, fn: torch.nn.Module, fn_inverse: torch.nn.Module, - num_bwd_passes: int, num_inputs: int, *args): - ctx.fn = fn - ctx.fn_inverse = fn_inverse - ctx.weights = args[num_inputs:] - ctx.num_bwd_passes = num_bwd_passes - ctx.num_inputs = num_inputs - inputs = args[:num_inputs] - ctx.input_requires_grad = [] - - with torch.no_grad(): # Make a detached copy which shares the storage: - x = [] - for element in inputs: - if isinstance(element, torch.Tensor): - x.append(element.detach()) - ctx.input_requires_grad.append(element.requires_grad) - else: - x.append(element) - ctx.input_requires_grad.append(None) - outputs = ctx.fn(*x) - - if not isinstance(outputs, tuple): - outputs = (outputs, ) - - # Detaches outputs in-place, allows discarding the intermedate result: - detached_outputs = tuple(element.detach_() for element in outputs) - - # Clear memory of node features: - if torch_geometric.typing.WITH_PT2: - inputs[0].untyped_storage().resize_(0) - else: # pragma: no cover - inputs[0].storage().resize_(0) - - # Store these tensor nodes for backward passes: - ctx.inputs = [inputs] * num_bwd_passes - ctx.outputs = [detached_outputs] * num_bwd_passes - - return detached_outputs - - @staticmethod - def backward(ctx, *grad_outputs): - if len(ctx.outputs) == 0: - raise RuntimeError( - f"Trying to perform a backward pass on the " - f"'InvertibleFunction' for more than '{ctx.num_bwd_passes}' " - f"times. Try raising 'num_bwd_passes'.") - - inputs = ctx.inputs.pop() - outputs = ctx.outputs.pop() - - # Recompute input by swapping out the first argument: - with torch.no_grad(): - inputs_inverted = ctx.fn_inverse(*(outputs + inputs[1:])) - if len(ctx.outputs) == 0: # Clear memory from outputs: - for element in outputs: - if torch_geometric.typing.WITH_PT2: - element.untyped_storage().resize_(0) - else: # pragma: no cover - element.storage().resize_(0) - - if not isinstance(inputs_inverted, tuple): - inputs_inverted = (inputs_inverted, ) - - for elem_orig, elem_inv in zip(inputs, inputs_inverted): - if torch_geometric.typing.WITH_PT2: - elem_orig.untyped_storage().resize_( - int(np.prod(elem_orig.size())) * - elem_orig.element_size()) - else: # pragma: no cover - elem_orig.storage().resize_(int(np.prod(elem_orig.size()))) - elem_orig.set_(elem_inv) - - # Compute gradients with grad enabled: - with torch.set_grad_enabled(True): - detached_inputs = [] - for element in inputs: - if isinstance(element, torch.Tensor): - detached_inputs.append(element.detach()) - else: - detached_inputs.append(element) - detached_inputs = tuple(detached_inputs) - for x, req_grad in zip(detached_inputs, ctx.input_requires_grad): - if isinstance(x, torch.Tensor): - x.requires_grad = req_grad - tmp_output = ctx.fn(*detached_inputs) - - if not isinstance(tmp_output, tuple): - tmp_output = (tmp_output, ) - - filtered_detached_inputs = tuple( - filter( - lambda x: x.requires_grad - if isinstance(x, torch.Tensor) else False, - detached_inputs, - )) - gradients = torch.autograd.grad( - outputs=tmp_output, - inputs=filtered_detached_inputs + ctx.weights, - grad_outputs=grad_outputs, - ) - - input_gradients = [] - i = 0 - for rg in ctx.input_requires_grad: - if rg: - input_gradients.append(gradients[i]) - i += 1 - else: - input_gradients.append(None) - - gradients = tuple(input_gradients) + gradients[-len(ctx.weights):] - - return (None, None, None, None) + gradients - - -class InvertibleModule(torch.nn.Module, ABC): - r"""An abstract class for implementing invertible modules. - - Args: - disable (bool, optional): If set to :obj:`True`, will disable the usage - of :class:`InvertibleFunction` and will execute the module without - memory savings. (default: :obj:`False`) - num_bwd_passes (int, optional): Number of backward passes to retain a - link with the output. After the last backward pass the output is - discarded and memory is freed. (default: :obj:`1`) - """ - def __init__(self, disable: bool = False, num_bwd_passes: int = 1): - super().__init__() - self.disable = disable - self.num_bwd_passes = num_bwd_passes - - def forward(self, *args): - """""" - return self._fn_apply(args, self._forward, self._inverse) - - def inverse(self, *args): - return self._fn_apply(args, self._inverse, self._forward) - - @abstractmethod - def _forward(self): - pass - - @abstractmethod - def _inverse(self): - pass - - def _fn_apply(self, args, fn, fn_inverse): - if not self.disable: - out = InvertibleFunction.apply( - fn, - fn_inverse, - self.num_bwd_passes, - len(args), - *args, - *tuple(p for p in self.parameters() if p.requires_grad), - ) - else: - out = fn(*args) - - # If the layer only has one input, we unpack the tuple: - if isinstance(out, tuple) and len(out) == 1: - return out[0] - - return out - - -class GroupAddRev(InvertibleModule): - r"""The Grouped Reversible GNN module from the `"Graph Neural Networks with - 1000 Layers" `_ paper. - This module enables training of arbitary deep GNNs with a memory complexity - independent of the number of layers. - - It does so by partitioning input node features :math:`\mathbf{X}` into - :math:`C` groups across the feature dimension. Then, a grouped reversible - GNN block :math:`f_{\theta(i)}` operates on a group of inputs and produces - a group of outputs: - - .. math:: - - \mathbf{X}^{\prime}_0 &= \sum_{i=2}^C \mathbf{X}_i - - \mathbf{X}^{\prime}_i &= f_{\theta(i)} ( \mathbf{X}^{\prime}_{i - 1}, - \mathbf{A}) + \mathbf{X}_i - - for all :math:`i \in \{ 1, \ldots, C \}`. - - .. note:: - - For an example of using :class:`GroupAddRev`, see `examples/rev_gnn.py - `_. - - Args: - conv (torch.nn.Module or torch.nn.ModuleList]): A seed GNN. The input - and output feature dimensions need to match. - split_dim (int, optional): The dimension across which to split groups. - (default: :obj:`-1`) - num_groups (int, optional): The number of groups :math:`C`. - (default: :obj:`None`) - disable (bool, optional): If set to :obj:`True`, will disable the usage - of :class:`InvertibleFunction` and will execute the module without - memory savings. (default: :obj:`False`) - num_bwd_passes (int, optional): Number of backward passes to retain a - link with the output. After the last backward pass the output is - discarded and memory is freed. (default: :obj:`1`) - """ - def __init__( - self, - conv: Union[torch.nn.Module, torch.nn.ModuleList], - split_dim: int = -1, - num_groups: Optional[int] = None, - disable: bool = False, - num_bwd_passes: int = 1, - ): - super().__init__(disable, num_bwd_passes) - self.split_dim = split_dim - - if isinstance(conv, torch.nn.ModuleList): - self.convs = conv - else: - assert num_groups is not None, "Please specific 'num_groups'" - self.convs = torch.nn.ModuleList([conv]) - for i in range(num_groups - 1): - conv = copy.deepcopy(self.convs[0]) - if hasattr(conv, 'reset_parameters'): - conv.reset_parameters() - self.convs.append(conv) - - if len(self.convs) < 2: - raise ValueError(f"The number of groups should not be smaller " - f"than '2' (got '{self.num_groups}'))") - - @property - def num_groups(self) -> int: - return len(self.convs) - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - for conv in self.convs: - conv.reset_parameters() - - def _forward(self, x: Tensor, edge_index: Adj, *args): - channels = x.size(self.split_dim) - xs = self._chunk(x, channels) - args = list(zip(*[self._chunk(arg, channels) for arg in args])) - args = [[]] * self.num_groups if len(args) == 0 else args - - ys = [] - y_in = sum(xs[1:]) - for i in range(self.num_groups): - y_in = xs[i] + self.convs[i](y_in, edge_index, *args[i]) - ys.append(y_in) - return torch.cat(ys, dim=self.split_dim) - - def _inverse(self, y: Tensor, edge_index: Adj, *args): - channels = y.size(self.split_dim) - ys = self._chunk(y, channels) - args = list(zip(*[self._chunk(arg, channels) for arg in args])) - args = [[]] * self.num_groups if len(args) == 0 else args - - xs = [] - for i in range(self.num_groups - 1, -1, -1): - if i != 0: - y_in = ys[i - 1] - else: - y_in = sum(xs) - x = ys[i] - self.convs[i](y_in, edge_index, *args[i]) - xs.append(x) - - return torch.cat(xs[::-1], dim=self.split_dim) - - def _chunk(self, x: Any, channels: int) -> List[Any]: - if not isinstance(x, Tensor): - return [x] * self.num_groups - - try: - if x.size(self.split_dim) != channels: - return [x] * self.num_groups - except IndexError: - return [x] * self.num_groups - - return torch.chunk(x, self.num_groups, dim=self.split_dim) - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.convs[0]}, ' - f'num_groups={self.num_groups})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/models/tgn.py b/pytorch_geometric-2.3.1/torch_geometric/nn/models/tgn.py deleted file mode 100644 index 67a1889..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/models/tgn.py +++ /dev/null @@ -1,289 +0,0 @@ -import copy -from typing import Callable, Dict, Tuple - -import torch -from torch import Tensor -from torch.nn import GRUCell, Linear - -from torch_geometric.nn.inits import zeros -from torch_geometric.utils import scatter - -TGNMessageStoreType = Dict[int, Tuple[Tensor, Tensor, Tensor, Tensor]] - - -class TGNMemory(torch.nn.Module): - r"""The Temporal Graph Network (TGN) memory model from the - `"Temporal Graph Networks for Deep Learning on Dynamic Graphs" - `_ paper. - - .. note:: - - For an example of using TGN, see `examples/tgn.py - `_. - - Args: - num_nodes (int): The number of nodes to save memories for. - raw_msg_dim (int): The raw message dimensionality. - memory_dim (int): The hidden memory dimensionality. - time_dim (int): The time encoding dimensionality. - message_module (torch.nn.Module): The message function which - combines source and destination node memory embeddings, the raw - message and the time encoding. - aggregator_module (torch.nn.Module): The message aggregator function - which aggregates messages to the same destination into a single - representation. - """ - def __init__(self, num_nodes: int, raw_msg_dim: int, memory_dim: int, - time_dim: int, message_module: Callable, - aggregator_module: Callable): - super().__init__() - - self.num_nodes = num_nodes - self.raw_msg_dim = raw_msg_dim - self.memory_dim = memory_dim - self.time_dim = time_dim - - self.msg_s_module = message_module - self.msg_d_module = copy.deepcopy(message_module) - self.aggr_module = aggregator_module - self.time_enc = TimeEncoder(time_dim) - self.gru = GRUCell(message_module.out_channels, memory_dim) - - self.register_buffer('memory', torch.empty(num_nodes, memory_dim)) - last_update = torch.empty(self.num_nodes, dtype=torch.long) - self.register_buffer('last_update', last_update) - self.register_buffer('_assoc', torch.empty(num_nodes, - dtype=torch.long)) - - self.msg_s_store = {} - self.msg_d_store = {} - - self.reset_parameters() - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - if hasattr(self.msg_s_module, 'reset_parameters'): - self.msg_s_module.reset_parameters() - if hasattr(self.msg_d_module, 'reset_parameters'): - self.msg_d_module.reset_parameters() - if hasattr(self.aggr_module, 'reset_parameters'): - self.aggr_module.reset_parameters() - self.time_enc.reset_parameters() - self.gru.reset_parameters() - self.reset_state() - - def reset_state(self): - """Resets the memory to its initial state.""" - zeros(self.memory) - zeros(self.last_update) - self._reset_message_store() - - def detach(self): - """Detaches the memory from gradient computation.""" - self.memory.detach_() - - def forward(self, n_id: Tensor) -> Tuple[Tensor, Tensor]: - """Returns, for all nodes :obj:`n_id`, their current memory and their - last updated timestamp.""" - if self.training: - memory, last_update = self._get_updated_memory(n_id) - else: - memory, last_update = self.memory[n_id], self.last_update[n_id] - - return memory, last_update - - def update_state(self, src: Tensor, dst: Tensor, t: Tensor, - raw_msg: Tensor): - """Updates the memory with newly encountered interactions - :obj:`(src, dst, t, raw_msg)`.""" - n_id = torch.cat([src, dst]).unique() - - if self.training: - self._update_memory(n_id) - self._update_msg_store(src, dst, t, raw_msg, self.msg_s_store) - self._update_msg_store(dst, src, t, raw_msg, self.msg_d_store) - else: - self._update_msg_store(src, dst, t, raw_msg, self.msg_s_store) - self._update_msg_store(dst, src, t, raw_msg, self.msg_d_store) - self._update_memory(n_id) - - def _reset_message_store(self): - i = self.memory.new_empty((0, ), dtype=torch.long) - msg = self.memory.new_empty((0, self.raw_msg_dim)) - # Message store format: (src, dst, t, msg) - self.msg_s_store = {j: (i, i, i, msg) for j in range(self.num_nodes)} - self.msg_d_store = {j: (i, i, i, msg) for j in range(self.num_nodes)} - - def _update_memory(self, n_id: Tensor): - memory, last_update = self._get_updated_memory(n_id) - self.memory[n_id] = memory - self.last_update[n_id] = last_update - - def _get_updated_memory(self, n_id: Tensor) -> Tuple[Tensor, Tensor]: - self._assoc[n_id] = torch.arange(n_id.size(0), device=n_id.device) - - # Compute messages (src -> dst). - msg_s, t_s, src_s, dst_s = self._compute_msg(n_id, self.msg_s_store, - self.msg_s_module) - - # Compute messages (dst -> src). - msg_d, t_d, src_d, dst_d = self._compute_msg(n_id, self.msg_d_store, - self.msg_d_module) - - # Aggregate messages. - idx = torch.cat([src_s, src_d], dim=0) - msg = torch.cat([msg_s, msg_d], dim=0) - t = torch.cat([t_s, t_d], dim=0) - aggr = self.aggr_module(msg, self._assoc[idx], t, n_id.size(0)) - - # Get local copy of updated memory. - memory = self.gru(aggr, self.memory[n_id]) - - # Get local copy of updated `last_update`. - dim_size = self.last_update.size(0) - last_update = scatter(t, idx, 0, dim_size, reduce='max')[n_id] - - return memory, last_update - - def _update_msg_store(self, src: Tensor, dst: Tensor, t: Tensor, - raw_msg: Tensor, msg_store: TGNMessageStoreType): - n_id, perm = src.sort() - n_id, count = n_id.unique_consecutive(return_counts=True) - for i, idx in zip(n_id.tolist(), perm.split(count.tolist())): - msg_store[i] = (src[idx], dst[idx], t[idx], raw_msg[idx]) - - def _compute_msg(self, n_id: Tensor, msg_store: TGNMessageStoreType, - msg_module: Callable): - data = [msg_store[i] for i in n_id.tolist()] - src, dst, t, raw_msg = list(zip(*data)) - src = torch.cat(src, dim=0) - dst = torch.cat(dst, dim=0) - t = torch.cat(t, dim=0) - raw_msg = torch.cat(raw_msg, dim=0) - t_rel = t - self.last_update[src] - t_enc = self.time_enc(t_rel.to(raw_msg.dtype)) - - msg = msg_module(self.memory[src], self.memory[dst], raw_msg, t_enc) - - return msg, t, src, dst - - def train(self, mode: bool = True): - """Sets the module in training mode.""" - if self.training and not mode: - # Flush message store to memory in case we just entered eval mode. - self._update_memory( - torch.arange(self.num_nodes, device=self.memory.device)) - self._reset_message_store() - super().train(mode) - - -class IdentityMessage(torch.nn.Module): - def __init__(self, raw_msg_dim: int, memory_dim: int, time_dim: int): - super().__init__() - self.out_channels = raw_msg_dim + 2 * memory_dim + time_dim - - def forward(self, z_src: Tensor, z_dst: Tensor, raw_msg: Tensor, - t_enc: Tensor): - return torch.cat([z_src, z_dst, raw_msg, t_enc], dim=-1) - - -class LastAggregator(torch.nn.Module): - def forward(self, msg: Tensor, index: Tensor, t: Tensor, dim_size: int): - from torch_scatter import scatter_max - _, argmax = scatter_max(t, index, dim=0, dim_size=dim_size) - out = msg.new_zeros((dim_size, msg.size(-1))) - mask = argmax < msg.size(0) # Filter items with at least one entry. - out[mask] = msg[argmax[mask]] - return out - - -class MeanAggregator(torch.nn.Module): - def forward(self, msg: Tensor, index: Tensor, t: Tensor, dim_size: int): - return scatter(msg, index, dim=0, dim_size=dim_size, reduce='mean') - - -class TimeEncoder(torch.nn.Module): - def __init__(self, out_channels: int): - super().__init__() - self.out_channels = out_channels - self.lin = Linear(1, out_channels) - - def reset_parameters(self): - self.lin.reset_parameters() - - def forward(self, t: Tensor) -> Tensor: - return self.lin(t.view(-1, 1)).cos() - - -class LastNeighborLoader(object): - def __init__(self, num_nodes: int, size: int, device=None): - self.size = size - - self.neighbors = torch.empty((num_nodes, size), dtype=torch.long, - device=device) - self.e_id = torch.empty((num_nodes, size), dtype=torch.long, - device=device) - self._assoc = torch.empty(num_nodes, dtype=torch.long, device=device) - - self.reset_state() - - def __call__(self, n_id: Tensor) -> Tuple[Tensor, Tensor, Tensor]: - neighbors = self.neighbors[n_id] - nodes = n_id.view(-1, 1).repeat(1, self.size) - e_id = self.e_id[n_id] - - # Filter invalid neighbors (identified by `e_id < 0`). - mask = e_id >= 0 - neighbors, nodes, e_id = neighbors[mask], nodes[mask], e_id[mask] - - # Relabel node indices. - n_id = torch.cat([n_id, neighbors]).unique() - self._assoc[n_id] = torch.arange(n_id.size(0), device=n_id.device) - neighbors, nodes = self._assoc[neighbors], self._assoc[nodes] - - return n_id, torch.stack([neighbors, nodes]), e_id - - def insert(self, src: Tensor, dst: Tensor): - # Inserts newly encountered interactions into an ever-growing - # (undirected) temporal graph. - - # Collect central nodes, their neighbors and the current event ids. - neighbors = torch.cat([src, dst], dim=0) - nodes = torch.cat([dst, src], dim=0) - e_id = torch.arange(self.cur_e_id, self.cur_e_id + src.size(0), - device=src.device).repeat(2) - self.cur_e_id += src.numel() - - # Convert newly encountered interaction ids so that they point to - # locations of a "dense" format of shape [num_nodes, size]. - nodes, perm = nodes.sort() - neighbors, e_id = neighbors[perm], e_id[perm] - - n_id = nodes.unique() - self._assoc[n_id] = torch.arange(n_id.numel(), device=n_id.device) - - dense_id = torch.arange(nodes.size(0), device=nodes.device) % self.size - dense_id += self._assoc[nodes].mul_(self.size) - - dense_e_id = e_id.new_full((n_id.numel() * self.size, ), -1) - dense_e_id[dense_id] = e_id - dense_e_id = dense_e_id.view(-1, self.size) - - dense_neighbors = e_id.new_empty(n_id.numel() * self.size) - dense_neighbors[dense_id] = neighbors - dense_neighbors = dense_neighbors.view(-1, self.size) - - # Collect new and old interactions... - e_id = torch.cat([self.e_id[n_id, :self.size], dense_e_id], dim=-1) - neighbors = torch.cat( - [self.neighbors[n_id, :self.size], dense_neighbors], dim=-1) - - # And sort them based on `e_id`. - e_id, perm = e_id.topk(self.size, dim=-1) - self.e_id[n_id] = e_id - self.neighbors[n_id] = torch.gather(neighbors, 1, perm) - - def reset_state(self): - self.cur_e_id = 0 - self.e_id.fill_(-1) diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/module_dict.py b/pytorch_geometric-2.3.1/torch_geometric/nn/module_dict.py deleted file mode 100644 index 09c3cab..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/module_dict.py +++ /dev/null @@ -1,45 +0,0 @@ -from typing import Iterable, Mapping, Optional, Tuple - -import torch -from torch.nn import Module - - -# `torch.nn.ModuleDict` doesn't allow `.` to be used in key names. -# This `ModuleDict` will support it by converting the `.` to `#` in the -# internal representation and converts it back to `.` in the external -# representation. -class ModuleDict(torch.nn.ModuleDict): - def __init__(self, modules: Optional[Mapping[str, Module]] = None): - if modules is not None: # Replace the keys in modules: - modules = { - self.to_internal_key(key): module - for key, module in modules.items() - } - super().__init__(modules) - - @staticmethod - def to_internal_key(key: str) -> str: - return key.replace(".", "#") - - @staticmethod - def to_external_key(key: str) -> str: - return key.replace("#", ".") - - def __getitem__(self, key: str) -> Module: - return super().__getitem__(self.to_internal_key(key)) - - def __setitem__(self, key: str, module: Module): - return super().__setitem__(self.to_internal_key(key), module) - - def __delitem__(self, key: str): - return super().__delitem__(self.to_internal_key(key)) - - def __contains__(self, key: str) -> bool: - return super().__contains__(self.to_internal_key(key)) - - def keys(self) -> Iterable[str]: - return [self.to_external_key(key) for key in super().keys()] - - def items(self) -> Iterable[Tuple[str, Module]]: - return [(self.to_external_key(key), value) - for key, value in super().items()] diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/norm/graph_size_norm.py b/pytorch_geometric-2.3.1/torch_geometric/nn/norm/graph_size_norm.py deleted file mode 100644 index 6bf8899..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/norm/graph_size_norm.py +++ /dev/null @@ -1,36 +0,0 @@ -import torch -import torch.nn as nn -from torch import Tensor - -from torch_geometric.typing import OptTensor -from torch_geometric.utils import degree - - -class GraphSizeNorm(nn.Module): - r"""Applies Graph Size Normalization over each individual graph in a batch - of node features as described in the - `"Benchmarking Graph Neural Networks" `_ - paper - - .. math:: - \mathbf{x}^{\prime}_i = \frac{\mathbf{x}_i}{\sqrt{|\mathcal{V}|}} - """ - def __init__(self): - super().__init__() - - def forward(self, x: Tensor, batch: OptTensor = None) -> Tensor: - r""" - Args: - x (torch.Tensor): The source tensor. - batch (torch.Tensor, optional): The batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns - each element to a specific example. (default: :obj:`None`) - """ - if batch is None: - batch = torch.zeros(x.size(0), dtype=torch.long, device=x.device) - - inv_sqrt_deg = degree(batch, dtype=x.dtype).pow(-0.5) - return x * inv_sqrt_deg.index_select(0, batch).view(-1, 1) - - def __repr__(self) -> str: - return f'{self.__class__.__name__}()' diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/norm/layer_norm.py b/pytorch_geometric-2.3.1/torch_geometric/nn/norm/layer_norm.py deleted file mode 100644 index 6dddcc6..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/norm/layer_norm.py +++ /dev/null @@ -1,176 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import Tensor -from torch.nn import Parameter - -from torch_geometric.nn.inits import ones, zeros -from torch_geometric.typing import OptTensor -from torch_geometric.utils import degree, scatter - - -class LayerNorm(torch.nn.Module): - r"""Applies layer normalization over each individual example in a batch - of features as described in the `"Layer Normalization" - `_ paper - - .. math:: - \mathbf{x}^{\prime}_i = \frac{\mathbf{x} - - \textrm{E}[\mathbf{x}]}{\sqrt{\textrm{Var}[\mathbf{x}] + \epsilon}} - \odot \gamma + \beta - - The mean and standard-deviation are calculated across all nodes and all - node channels separately for each object in a mini-batch. - - Args: - in_channels (int): Size of each input sample. - eps (float, optional): A value added to the denominator for numerical - stability. (default: :obj:`1e-5`) - affine (bool, optional): If set to :obj:`True`, this module has - learnable affine parameters :math:`\gamma` and :math:`\beta`. - (default: :obj:`True`) - mode (str, optinal): The normalization mode to use for layer - normalization (:obj:`"graph"` or :obj:`"node"`). If :obj:`"graph"` - is used, each graph will be considered as an element to be - normalized. If `"node"` is used, each node will be considered as - an element to be normalized. (default: :obj:`"graph"`) - """ - def __init__( - self, - in_channels: int, - eps: float = 1e-5, - affine: bool = True, - mode: str = 'graph', - ): - super().__init__() - - self.in_channels = in_channels - self.eps = eps - self.affine = affine - self.mode = mode - - if affine: - self.weight = Parameter(torch.Tensor(in_channels)) - self.bias = Parameter(torch.Tensor(in_channels)) - else: - self.register_parameter('weight', None) - self.register_parameter('bias', None) - - self.reset_parameters() - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - ones(self.weight) - zeros(self.bias) - - def forward(self, x: Tensor, batch: OptTensor = None) -> Tensor: - r""" - Args: - x (torch.Tensor): The source tensor. - batch (torch.Tensor, optional): The batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns - each element to a specific example. (default: :obj:`None`) - """ - if self.mode == 'graph': - if batch is None: - x = x - x.mean() - out = x / (x.std(unbiased=False) + self.eps) - - else: - batch_size = int(batch.max()) + 1 - - norm = degree(batch, batch_size, dtype=x.dtype).clamp_(min=1) - norm = norm.mul_(x.size(-1)).view(-1, 1) - - mean = scatter(x, batch, dim=0, dim_size=batch_size, - reduce='sum').sum(dim=-1, keepdim=True) / norm - - x = x - mean.index_select(0, batch) - - var = scatter(x * x, batch, dim=0, dim_size=batch_size, - reduce='sum').sum(dim=-1, keepdim=True) - var = var / norm - - out = x / (var + self.eps).sqrt().index_select(0, batch) - - if self.weight is not None and self.bias is not None: - out = out * self.weight + self.bias - - return out - - if self.mode == 'node': - return F.layer_norm(x, (self.in_channels, ), self.weight, - self.bias, self.eps) - - raise ValueError(f"Unknow normalization mode: {self.mode}") - - def __repr__(self): - return (f'{self.__class__.__name__}({self.in_channels}, ' - f'affine={self.affine}, mode={self.mode})') - - -class HeteroLayerNorm(torch.nn.Module): - r"""Applies layer normalization over each individual example in a batch - of heterogeneous features as described in the `"Layer Normalization" - `_ paper. - Compared to :class:`LayerNorm`, :class:`HeteroLayerNorm` applies - normalization individually for each node or edge type. - - Args: - in_channels (int): Size of each input sample. - num_types (int): The number of types. - eps (float, optional): A value added to the denominator for numerical - stability. (default: :obj:`1e-5`) - affine (bool, optional): If set to :obj:`True`, this module has - learnable affine parameters :math:`\gamma` and :math:`\beta`. - (default: :obj:`True`) - mode (str, optinal): The normalization mode to use for layer - normalization (:obj:`"node"`). If `"node"` is used, each node will - be considered as an element to be normalized. - (default: :obj:`"node"`) - """ - def __init__( - self, - in_channels: int, - num_types: int, - eps: float = 1e-5, - affine: bool = True, - mode: str = 'node', - ): - super().__init__() - - self.in_channels = in_channels - self.num_types = num_types - self.eps = eps - self.affine = affine - - if affine: - self.weight = Parameter(torch.Tensor(num_types, in_channels)) - self.bias = Parameter(torch.Tensor(num_types, in_channels)) - else: - self.register_parameter('weight', None) - self.register_parameter('bias', None) - - self.reset_parameters() - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - if self.affine: - torch.nn.init.ones_(self.weight) - torch.nn.init.zeros_(self.bias) - - def forward(self, x: Tensor, type_vec: Tensor) -> Tensor: - r""" - Args: - x (torch.Tensor): The input features. - type_vec (torch.Tensor): A vector that maps each entry to a type. - """ - out = F.layer_norm(x, (self.in_channels, ), None, None, self.eps) - - if self.affine: - out = out * self.weight[type_vec] + self.bias[type_vec] - - return out - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.in_channels}, ' - f'num_types={self.num_types})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/parameter_dict.py b/pytorch_geometric-2.3.1/torch_geometric/nn/parameter_dict.py deleted file mode 100644 index 1d8ebc5..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/parameter_dict.py +++ /dev/null @@ -1,42 +0,0 @@ -from typing import Iterable, Mapping, Optional - -import torch -from torch.nn import Parameter - - -# `torch.nn.ParameterDict` doesn't allow `.` to be used in key names. -# This `ParameterDict` will support it by converting the `.` to `#` in the -# internal representation and converts it back to `.` in the external -# representation. -class ParameterDict(torch.nn.ParameterDict): - def __init__(self, parameters: Optional[Mapping[str, Parameter]] = None): - # Replace the keys in modules. - if parameters: - parameters = { - self.to_internal_key(key): module - for key, module in parameters.items() - } - super().__init__(parameters) - - @staticmethod - def to_internal_key(key: str) -> str: - return key.replace(".", "#") - - @staticmethod - def to_external_key(key: str) -> str: - return key.replace("#", ".") - - def __getitem__(self, key: str) -> Parameter: - return super().__getitem__(self.to_internal_key(key)) - - def __setitem__(self, key: str, parameter: Parameter): - return super().__setitem__(self.to_internal_key(key), parameter) - - def __delitem__(self, key: str): - return super().__delitem__(self.to_internal_key(key)) - - def __contains__(self, key: str) -> bool: - return super().__contains__(self.to_internal_key(key)) - - def keys(self) -> Iterable[str]: - return [self.to_external_key(key) for key in super().keys()] diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/pool/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/nn/pool/__init__.py deleted file mode 100644 index 4c986ac..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/pool/__init__.py +++ /dev/null @@ -1,273 +0,0 @@ -from torch import Tensor - -from torch_geometric.typing import OptTensor - -from .asap import ASAPooling -from .avg_pool import avg_pool, avg_pool_neighbor_x, avg_pool_x -from .edge_pool import EdgePooling -from .glob import global_add_pool, global_max_pool, global_mean_pool -from .graclus import graclus -from .max_pool import max_pool, max_pool_neighbor_x, max_pool_x -from .mem_pool import MemPooling -from .pan_pool import PANPooling -from .sag_pool import SAGPooling -from .topk_pool import TopKPooling -from .voxel_grid import voxel_grid - -try: - import torch_cluster -except ImportError: - torch_cluster = None - - -def fps(x: Tensor, batch: OptTensor = None, ratio: float = 0.5, - random_start: bool = True) -> Tensor: - r"""A sampling algorithm from the `"PointNet++: Deep Hierarchical Feature - Learning on Point Sets in a Metric Space" - `_ paper, which iteratively samples the - most distant point with regard to the rest points. - - .. code-block:: python - - import torch - from torch_geometric.nn import fps - - x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) - batch = torch.tensor([0, 0, 0, 0]) - index = fps(x, batch, ratio=0.5) - - Args: - x (torch.Tensor): Node feature matrix - :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. - batch (torch.Tensor, optional): Batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each - node to a specific example. (default: :obj:`None`) - ratio (float, optional): Sampling ratio. (default: :obj:`0.5`) - random_start (bool, optional): If set to :obj:`False`, use the first - node in :math:`\mathbf{X}` as starting node. (default: obj:`True`) - - :rtype: :class:`torch.Tensor` - """ - return torch_cluster.fps(x, batch, ratio, random_start) - - -def knn(x: Tensor, y: Tensor, k: int, batch_x: OptTensor = None, - batch_y: OptTensor = None, cosine: bool = False, - num_workers: int = 1) -> Tensor: - r"""Finds for each element in :obj:`y` the :obj:`k` nearest points in - :obj:`x`. - - .. code-block:: python - - import torch - from torch_geometric.nn import knn - - x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) - batch_x = torch.tensor([0, 0, 0, 0]) - y = torch.Tensor([[-1, 0], [1, 0]]) - batch_y = torch.tensor([0, 0]) - assign_index = knn(x, y, 2, batch_x, batch_y) - - Args: - x (torch.Tensor): Node feature matrix - :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. - y (torch.Tensor): Node feature matrix - :math:`\mathbf{X} \in \mathbb{R}^{M \times F}`. - k (int): The number of neighbors. - batch_x (torch.Tensor, optional): Batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each - node to a specific example. (default: :obj:`None`) - batch_y (torch.Tensor, optional): Batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^M`, which assigns each - node to a specific example. (default: :obj:`None`) - cosine (bool, optional): If :obj:`True`, will use the cosine - distance instead of euclidean distance to find nearest neighbors. - (default: :obj:`False`) - num_workers (int, optional): Number of workers to use for computation. - Has no effect in case :obj:`batch_x` or :obj:`batch_y` is not - :obj:`None`, or the input lies on the GPU. (default: :obj:`1`) - - :rtype: :class:`torch.Tensor` - """ - return torch_cluster.knn(x, y, k, batch_x, batch_y, cosine, num_workers) - - -def knn_graph(x: Tensor, k: int, batch: OptTensor = None, loop: bool = False, - flow: str = 'source_to_target', cosine: bool = False, - num_workers: int = 1) -> Tensor: - r"""Computes graph edges to the nearest :obj:`k` points. - - .. code-block:: python - - import torch - from torch_geometric.nn import knn_graph - - x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) - batch = torch.tensor([0, 0, 0, 0]) - edge_index = knn_graph(x, k=2, batch=batch, loop=False) - - Args: - x (torch.Tensor): Node feature matrix - :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. - k (int): The number of neighbors. - batch (torch.Tensor, optional): Batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each - node to a specific example. (default: :obj:`None`) - loop (bool, optional): If :obj:`True`, the graph will contain - self-loops. (default: :obj:`False`) - flow (str, optional): The flow direction when using in combination with - message passing (:obj:`"source_to_target"` or - :obj:`"target_to_source"`). (default: :obj:`"source_to_target"`) - cosine (bool, optional): If :obj:`True`, will use the cosine - distance instead of euclidean distance to find nearest neighbors. - (default: :obj:`False`) - num_workers (int, optional): Number of workers to use for computation. - Has no effect in case :obj:`batch` is not :obj:`None`, or the input - lies on the GPU. (default: :obj:`1`) - - :rtype: :class:`torch.Tensor` - """ - return torch_cluster.knn_graph(x, k, batch, loop, flow, cosine, - num_workers) - - -def radius(x: Tensor, y: Tensor, r: float, batch_x: OptTensor = None, - batch_y: OptTensor = None, max_num_neighbors: int = 32, - num_workers: int = 1) -> Tensor: - r"""Finds for each element in :obj:`y` all points in :obj:`x` within - distance :obj:`r`. - - .. code-block:: python - - import torch - from torch_geometric.nn import radius - - x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) - batch_x = torch.tensor([0, 0, 0, 0]) - y = torch.Tensor([[-1, 0], [1, 0]]) - batch_y = torch.tensor([0, 0]) - assign_index = radius(x, y, 1.5, batch_x, batch_y) - - Args: - x (torch.Tensor): Node feature matrix - :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. - y (torch.Tensor): Node feature matrix - :math:`\mathbf{Y} \in \mathbb{R}^{M \times F}`. - r (float): The radius. - batch_x (torch.Tensor, optional): Batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each - node to a specific example. (default: :obj:`None`) - batch_y (torch.Tensor, optional): Batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^M`, which assigns each - node to a specific example. (default: :obj:`None`) - max_num_neighbors (int, optional): The maximum number of neighbors to - return for each element in :obj:`y`. (default: :obj:`32`) - num_workers (int, optional): Number of workers to use for computation. - Has no effect in case :obj:`batch_x` or :obj:`batch_y` is not - :obj:`None`, or the input lies on the GPU. (default: :obj:`1`) - - :rtype: :class:`torch.Tensor` - """ - return torch_cluster.radius(x, y, r, batch_x, batch_y, max_num_neighbors, - num_workers) - - -def radius_graph(x: Tensor, r: float, batch: OptTensor = None, - loop: bool = False, max_num_neighbors: int = 32, - flow: str = 'source_to_target', - num_workers: int = 1) -> Tensor: - r"""Computes graph edges to all points within a given distance. - - .. code-block:: python - - import torch - from torch_geometric.nn import radius_graph - - x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) - batch = torch.tensor([0, 0, 0, 0]) - edge_index = radius_graph(x, r=1.5, batch=batch, loop=False) - - Args: - x (torch.Tensor): Node feature matrix - :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. - r (float): The radius. - batch (torch.Tensor, optional): Batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each - node to a specific example. (default: :obj:`None`) - loop (bool, optional): If :obj:`True`, the graph will contain - self-loops. (default: :obj:`False`) - max_num_neighbors (int, optional): The maximum number of neighbors to - return for each element in :obj:`y`. (default: :obj:`32`) - flow (str, optional): The flow direction when using in combination with - message passing (:obj:`"source_to_target"` or - :obj:`"target_to_source"`). (default: :obj:`"source_to_target"`) - num_workers (int, optional): Number of workers to use for computation. - Has no effect in case :obj:`batch` is not :obj:`None`, or the input - lies on the GPU. (default: :obj:`1`) - - :rtype: :class:`torch.Tensor` - """ - return torch_cluster.radius_graph(x, r, batch, loop, max_num_neighbors, - flow, num_workers) - - -def nearest(x: Tensor, y: Tensor, batch_x: OptTensor = None, - batch_y: OptTensor = None) -> Tensor: - r"""Finds for each element in :obj:`y` the :obj:`k` nearest point in - :obj:`x`. - - .. code-block:: python - - import torch - from torch_geometric.nn import nearest - - x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) - batch_x = torch.tensor([0, 0, 0, 0]) - y = torch.Tensor([[-1, 0], [1, 0]]) - batch_y = torch.tensor([0, 0]) - cluster = nearest(x, y, batch_x, batch_y) - - Args: - x (torch.Tensor): Node feature matrix - :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. - y (torch.Tensor): Node feature matrix - :math:`\mathbf{Y} \in \mathbb{R}^{M \times F}`. - batch_x (torch.Tensor, optional): Batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each - node to a specific example. (default: :obj:`None`) - batch_y (torch.Tensor, optional): Batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^M`, which assigns each - node to a specific example. (default: :obj:`None`) - - :rtype: :class:`torch.Tensor` - """ - return torch_cluster.nearest(x, y, batch_x, batch_y) - - -__all__ = [ - 'global_add_pool', - 'global_mean_pool', - 'global_max_pool', - 'TopKPooling', - 'SAGPooling', - 'EdgePooling', - 'ASAPooling', - 'PANPooling', - 'MemPooling', - 'max_pool', - 'avg_pool', - 'max_pool_x', - 'max_pool_neighbor_x', - 'avg_pool_x', - 'avg_pool_neighbor_x', - 'graclus', - 'voxel_grid', - 'fps', - 'knn', - 'knn_graph', - 'radius', - 'radius_graph', - 'nearest', -] - -classes = __all__ diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/pool/base.py b/pytorch_geometric-2.3.1/torch_geometric/nn/pool/base.py deleted file mode 100644 index 1037280..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/pool/base.py +++ /dev/null @@ -1,94 +0,0 @@ -from dataclasses import dataclass -from typing import Optional - -import torch -from torch import Tensor - -from torch_geometric.nn.aggr import Aggregation -from torch_geometric.nn.pool.connect import Connect -from torch_geometric.nn.pool.select import Select -from torch_geometric.utils.mixin import CastMixin - - -@dataclass -class PoolingOutput(CastMixin): - r"""The pooling output of a :class:`torch_geometric.nn.pool.Pooling` - module. - - Args: - x (torch.Tensor): The pooled node features. - edge_index (torch.Tensor): The coarsened edge indices. - edge_attr (torch.Tensor, optional): The edge features of the coarsened - graph. (default: :obj:`None`) - batch (torch.Tensor, optional): The batch vector of the pooled nodes. - """ - x: Tensor - edge_index: Tensor - edge_attr: Optional[Tensor] = None - batch: Optional[Tensor] = None - - -class Pooling(torch.nn.Module): - r"""A base class for pooling layers based on the - `"Understanding Pooling in Graph Neural Networks" - `_ paper. - - :class:`Pooling` decomposes a pooling layer into three components: - - #. :class:`Select` defines how input nodes map to supernodes. - - #. :class:`Reduce` defines how input node features are aggregated. - - #. :class:`Connect` decides how the supernodes are connected to each other. - - Args: - select (Select): The node selection operator. - reduce (Select): The node feature aggregation operator. - connect (Connect): The edge connection operator. - """ - def __init__(self, select: Select, reduce: Aggregation, connect: Connect): - super().__init__() - self.select = select - self.reduce = reduce - self.connect = connect - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - self.select.reset_parameters() - self.reduce.reset_parameters() - self.connect.reset_parameters() - - def forward( - self, - x: torch.Tensor, - edge_index: torch.Tensor, - edge_attr: Optional[torch.Tensor] = None, - batch: Optional[torch.Tensor] = None, - ) -> PoolingOutput: - r""" - Args: - x (torch.Tensor): The input node features. - edge_index (torch.Tensor): The edge indices. - edge_attr (torch.Tensor, optional): The edge features. - (default: :obj:`None`) - batch (torch.Tensor, optional): The batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns - each node to a specific graph. (default: :obj:`None`) - """ - cluster, num_clusters = self.select(x, edge_index, edge_attr, batch) - x = self.reduce(x, cluster, dim_size=num_clusters) - edge_index, edge_attr = self.connect(cluster, edge_index, edge_attr, - batch) - - if batch is not None: - batch = (torch.arange(num_clusters, device=x.device)).scatter_( - 0, cluster, batch) - - return PoolingOutput(x, edge_index, edge_attr, batch) - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}(\n' - f' select={self.select},\n' - f' reduce={self.reduce},\n' - f' connect={self.connect},\n' - f')') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/pool/connect/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/nn/pool/connect/__init__.py deleted file mode 100644 index aabde59..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/pool/connect/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .base import Connect - -__all__ = [ - 'Connect', -] diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/pool/connect/base.py b/pytorch_geometric-2.3.1/torch_geometric/nn/pool/connect/base.py deleted file mode 100644 index 36c0242..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/pool/connect/base.py +++ /dev/null @@ -1,39 +0,0 @@ -from typing import Optional, Tuple - -import torch -from torch import Tensor - - -class Connect(torch.nn.Module): - r"""An abstract base class implementing custom edge connection operators. - - Specifically, :class:`Connect` determines for each pair of supernodes the - presence or abscene of an edge based on the existing edges between the - nodes in the two supernodes. - The operator also computes new coarsened edge features (if present). - """ - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - pass - - def forward( - self, - cluster: Tensor, - edge_index: Tensor, - edge_attr: Optional[Tensor] = None, - batch: Optional[Tensor] = None, - ) -> Tuple[Tensor, Optional[Tensor]]: - r""" - Args: - cluster (torch.Tensor): The mapping from nodes to supernodes. - edge_index (torch.Tensor): The edge indices. - edge_attr (torch.Tensor, optional): The edge features. - (default: :obj:`None`) - batch (torch.Tensor, optional): The batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns - each node to a specific graph. (default: :obj:`None`) - """ - raise NotImplementedError - - def __repr__(self) -> str: - return f'{self.__class__.__name__}()' diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/pool/mem_pool.py b/pytorch_geometric-2.3.1/torch_geometric/nn/pool/mem_pool.py deleted file mode 100644 index 446916b..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/pool/mem_pool.py +++ /dev/null @@ -1,124 +0,0 @@ -from typing import Optional, Tuple - -import torch -from torch import Tensor -from torch.nn import Conv2d, KLDivLoss, Linear, Parameter - -from torch_geometric.utils import to_dense_batch - -EPS = 1e-15 - - -class MemPooling(torch.nn.Module): - r"""Memory based pooling layer from `"Memory-Based Graph Networks" - `_ paper, which learns a coarsened graph - representation based on soft cluster assignments - - .. math:: - S_{i,j}^{(h)} &= \frac{ - (1+{\| \mathbf{x}_i-\mathbf{k}^{(h)}_j \|}^2 / \tau)^{ - -\frac{1+\tau}{2}}}{ - \sum_{k=1}^K (1 + {\| \mathbf{x}_i-\mathbf{k}^{(h)}_k \|}^2 / \tau)^{ - -\frac{1+\tau}{2}}} - - \mathbf{S} &= \textrm{softmax}(\textrm{Conv2d} - (\Vert_{h=1}^H \mathbf{S}^{(h)})) \in \mathbb{R}^{N \times K} - - \mathbf{X}^{\prime} &= \mathbf{S}^{\top} \mathbf{X} \mathbf{W} \in - \mathbb{R}^{K \times F^{\prime}} - - Where :math:`H` denotes the number of heads, and :math:`K` denotes the - number of clusters. - - Args: - in_channels (int): Size of each input sample :math:`F`. - out_channels (int): Size of each output sample :math:`F^{\prime}`. - heads (int): The number of heads :math:`H`. - num_clusters (int): number of clusters :math:`K` per head. - tau (int, optional): The temperature :math:`\tau`. (default: :obj:`1.`) - """ - def __init__(self, in_channels: int, out_channels: int, heads: int, - num_clusters: int, tau: float = 1.): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.heads = heads - self.num_clusters = num_clusters - self.tau = tau - - self.k = Parameter(torch.Tensor(heads, num_clusters, in_channels)) - self.conv = Conv2d(heads, 1, kernel_size=1, padding=0, bias=False) - self.lin = Linear(in_channels, out_channels, bias=False) - - self.reset_parameters() - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - torch.nn.init.uniform_(self.k.data, -1., 1.) - self.conv.reset_parameters() - self.lin.reset_parameters() - - @staticmethod - def kl_loss(S: Tensor) -> Tensor: - r"""The additional KL divergence-based loss - - .. math:: - P_{i,j} &= \frac{S_{i,j}^2 / \sum_{n=1}^N S_{n,j}}{\sum_{k=1}^K - S_{i,k}^2 / \sum_{n=1}^N S_{n,k}} - - \mathcal{L}_{\textrm{KL}} &= \textrm{KLDiv}(\mathbf{P} \Vert - \mathbf{S}) - """ - S_2 = S**2 - P = S_2 / S.sum(dim=1, keepdim=True) - denom = P.sum(dim=2, keepdim=True) - denom[S.sum(dim=2, keepdim=True) == 0.0] = 1.0 - P /= denom - - loss = KLDivLoss(reduction='batchmean', log_target=False) - return loss(S.clamp(EPS).log(), P.clamp(EPS)) - - def forward(self, x: Tensor, batch: Optional[Tensor] = None, - mask: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: - r""" - Args: - x (torch.Tensor): The node feature tensor of shape - :math:`\mathbf{X} \in \mathbb{R}^{N \times F}` or - :math:`\mathbf{X} \in \mathbb{R}^{B \times N \times F}`. - batch (torch.Tensor, optional): The batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns - each node to a specific example. - Should not be provided in case node features already have shape - :math:`\mathbf{X} \in \mathbb{R}^{B \times N \times F}`. - (default: :obj:`None`) - mask (torch.Tensor, optional): A mask matrix - :math:`\mathbf{M} \in {\{ 0, 1 \}}^{B \times N}`, which - indicates valid nodes for each graph when using - node features of shape - :math:`\mathbf{X} \in \mathbb{R}^{B \times N \times F}`. - (default: :obj:`None`) - """ - if x.dim() <= 2: - x, mask = to_dense_batch(x, batch) - elif mask is None: - mask = x.new_ones((x.size(0), x.size(1)), dtype=torch.bool) - - (B, N, _), H, K = x.size(), self.heads, self.num_clusters - - dist = torch.cdist(self.k.view(H * K, -1), x.view(B * N, -1), p=2)**2 - dist = (1. + dist / self.tau).pow(-(self.tau + 1.0) / 2.0) - - dist = dist.view(H, K, B, N).permute(2, 0, 3, 1) # [B, H, N, K] - S = dist / dist.sum(dim=-1, keepdim=True) - - S = self.conv(S).squeeze(dim=1).softmax(dim=-1) # [B, N, K] - S = S * mask.view(B, N, 1) - - x = self.lin(S.transpose(1, 2) @ x) - - return x, S - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.in_channels}, ' - f'{self.out_channels}, heads={self.heads}, ' - f'num_clusters={self.num_clusters})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/pool/select/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/nn/pool/select/__init__.py deleted file mode 100644 index fe695d7..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/pool/select/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .base import Select - -__all__ = [ - 'Select', -] diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/pool/select/base.py b/pytorch_geometric-2.3.1/torch_geometric/nn/pool/select/base.py deleted file mode 100644 index c23293c..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/pool/select/base.py +++ /dev/null @@ -1,39 +0,0 @@ -from typing import Optional, Tuple - -import torch -from torch import Tensor - - -class Select(torch.nn.Module): - r"""An abstract base class implementing custom node selections that map the - nodes of an input graph to supernodes of the pooled one. - - Specifically, :class:`Select` returns a mapping - :math:`\mathbf{c} \in {\{ -1, \ldots, C - 1\}}^N`, which assigns each node - to one of :math:`C` super nodes. - In addition, :class:`Select` returns the number of super nodes. - """ - def reset_parameters(self): - pass - - def forward( - self, - x: Tensor, - edge_index: Tensor, - edge_attr: Optional[Tensor] = None, - batch: Optional[Tensor] = None, - ) -> Tuple[Tensor, int]: - r""" - Args: - x (torch.Tensor): The input node features. - edge_index (torch.Tensor): The edge indices. - edge_attr (torch.Tensor, optional): The edge features. - (default: :obj:`None`) - batch (torch.Tensor, optional): The batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns - each node to a specific graph. (default: :obj:`None`) - """ - raise NotImplementedError - - def __repr__(self) -> str: - return f'{self.__class__.__name__}()' diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/pool/topk_pool.py b/pytorch_geometric-2.3.1/torch_geometric/nn/pool/topk_pool.py deleted file mode 100644 index 9984728..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/pool/topk_pool.py +++ /dev/null @@ -1,227 +0,0 @@ -from typing import Callable, Optional, Tuple, Union - -import torch -from torch import Tensor -from torch.nn import Parameter - -from torch_geometric.nn.inits import uniform -from torch_geometric.utils import scatter, softmax -from torch_geometric.utils.num_nodes import maybe_num_nodes - - -def topk( - x: Tensor, - ratio: Optional[Union[float, int]], - batch: Tensor, - min_score: Optional[float] = None, - tol: float = 1e-7, -) -> Tensor: - if min_score is not None: - # Make sure that we do not drop all nodes in a graph. - scores_max = scatter(x, batch, reduce='max')[batch] - tol - scores_min = scores_max.clamp(max=min_score) - - perm = (x > scores_min).nonzero().view(-1) - - elif ratio is not None: - num_nodes = scatter(batch.new_ones(x.size(0)), batch, reduce='sum') - batch_size, max_num_nodes = num_nodes.size(0), int(num_nodes.max()) - - cum_num_nodes = torch.cat( - [num_nodes.new_zeros(1), - num_nodes.cumsum(dim=0)[:-1]], dim=0) - - index = torch.arange(batch.size(0), dtype=torch.long, device=x.device) - index = (index - cum_num_nodes[batch]) + (batch * max_num_nodes) - - dense_x = x.new_full((batch_size * max_num_nodes, ), -60000.0) - dense_x[index] = x - dense_x = dense_x.view(batch_size, max_num_nodes) - - _, perm = dense_x.sort(dim=-1, descending=True) - - perm = perm + cum_num_nodes.view(-1, 1) - perm = perm.view(-1) - - if ratio >= 1: - k = num_nodes.new_full((num_nodes.size(0), ), int(ratio)) - k = torch.min(k, num_nodes) - else: - k = (float(ratio) * num_nodes.to(x.dtype)).ceil().to(torch.long) - - if isinstance(ratio, int) and (k == ratio).all(): - # If all graphs have exactly `ratio` or more than `ratio` entries, - # we can just pick the first entries in `perm` batch-wise: - index = torch.arange(batch_size, device=x.device) * max_num_nodes - index = index.view(-1, 1).repeat(1, ratio).view(-1) - index += torch.arange(ratio, device=x.device).repeat(batch_size) - else: - # Otherwise, compute indices per graph: - index = torch.cat([ - torch.arange(k[i], device=x.device) + i * max_num_nodes - for i in range(batch_size) - ], dim=0) - - perm = perm[index] - - else: - raise ValueError("At least one of 'min_score' and 'ratio' parameters " - "must be specified") - - return perm - - -def filter_adj( - edge_index: Tensor, - edge_attr: Optional[Tensor], - perm: Tensor, - num_nodes: Optional[int] = None, -) -> Tuple[Tensor, Optional[Tensor]]: - num_nodes = maybe_num_nodes(edge_index, num_nodes) - - mask = perm.new_full((num_nodes, ), -1) - i = torch.arange(perm.size(0), dtype=torch.long, device=perm.device) - mask[perm] = i - - row, col = edge_index[0], edge_index[1] - row, col = mask[row], mask[col] - mask = (row >= 0) & (col >= 0) - row, col = row[mask], col[mask] - - if edge_attr is not None: - edge_attr = edge_attr[mask] - - return torch.stack([row, col], dim=0), edge_attr - - -class TopKPooling(torch.nn.Module): - r""":math:`\mathrm{top}_k` pooling operator from the `"Graph U-Nets" - `_, `"Towards Sparse - Hierarchical Graph Classifiers" `_ - and `"Understanding Attention and Generalization in Graph Neural - Networks" `_ papers. - - If :obj:`min_score` :math:`\tilde{\alpha}` is :obj:`None`, computes: - - .. math:: - \mathbf{y} &= \frac{\mathbf{X}\mathbf{p}}{\| \mathbf{p} \|} - - \mathbf{i} &= \mathrm{top}_k(\mathbf{y}) - - \mathbf{X}^{\prime} &= (\mathbf{X} \odot - \mathrm{tanh}(\mathbf{y}))_{\mathbf{i}} - - \mathbf{A}^{\prime} &= \mathbf{A}_{\mathbf{i},\mathbf{i}} - - If :obj:`min_score` :math:`\tilde{\alpha}` is a value in :obj:`[0, 1]`, - computes: - - .. math:: - \mathbf{y} &= \mathrm{softmax}(\mathbf{X}\mathbf{p}) - - \mathbf{i} &= \mathbf{y}_i > \tilde{\alpha} - - \mathbf{X}^{\prime} &= (\mathbf{X} \odot \mathbf{y})_{\mathbf{i}} - - \mathbf{A}^{\prime} &= \mathbf{A}_{\mathbf{i},\mathbf{i}}, - - where nodes are dropped based on a learnable projection score - :math:`\mathbf{p}`. - - Args: - in_channels (int): Size of each input sample. - ratio (float or int): Graph pooling ratio, which is used to compute - :math:`k = \lceil \mathrm{ratio} \cdot N \rceil`, or the value - of :math:`k` itself, depending on whether the type of :obj:`ratio` - is :obj:`float` or :obj:`int`. - This value is ignored if :obj:`min_score` is not :obj:`None`. - (default: :obj:`0.5`) - min_score (float, optional): Minimal node score :math:`\tilde{\alpha}` - which is used to compute indices of pooled nodes - :math:`\mathbf{i} = \mathbf{y}_i > \tilde{\alpha}`. - When this value is not :obj:`None`, the :obj:`ratio` argument is - ignored. (default: :obj:`None`) - multiplier (float, optional): Coefficient by which features gets - multiplied after pooling. This can be useful for large graphs and - when :obj:`min_score` is used. (default: :obj:`1`) - nonlinearity (str or callable, optional): The non-linearity to use. - (default: :obj:`"tanh"`) - """ - def __init__( - self, - in_channels: int, - ratio: Union[int, float] = 0.5, - min_score: Optional[float] = None, - multiplier: float = 1., - nonlinearity: Union[str, Callable] = 'tanh', - ): - super().__init__() - - if isinstance(nonlinearity, str): - nonlinearity = getattr(torch, nonlinearity) - - self.in_channels = in_channels - self.ratio = ratio - self.min_score = min_score - self.multiplier = multiplier - self.nonlinearity = nonlinearity - - self.weight = Parameter(torch.Tensor(1, in_channels)) - - self.reset_parameters() - - def reset_parameters(self): - r"""Resets all learnable parameters of the module.""" - uniform(self.in_channels, self.weight) - - def forward( - self, - x: Tensor, - edge_index: Tensor, - edge_attr: Optional[Tensor] = None, - batch: Optional[Tensor] = None, - attn: Optional[Tensor] = None, - ) -> Tuple[Tensor, Tensor, Optional[Tensor], Tensor, Tensor, Tensor]: - r""" - Args: - x (torch.Tensor): The node feature matrix. - edge_index (torch.Tensor): The edge indices. - edge_attr (torch.Tensor, optional): The edge features. - (default: :obj:`None`) - batch (torch.Tensor, optional): The batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns - each node to a specific example. (default: :obj:`None`) - attn (torch.Tensor, optional): Optional node-level matrix to use - for computing attention scores instead of using the node - feature matrix :obj:`x`. (default: :obj:`None`) - """ - if batch is None: - batch = edge_index.new_zeros(x.size(0)) - - attn = x if attn is None else attn - attn = attn.unsqueeze(-1) if attn.dim() == 1 else attn - score = (attn * self.weight).sum(dim=-1) - - if self.min_score is None: - score = self.nonlinearity(score / self.weight.norm(p=2, dim=-1)) - else: - score = softmax(score, batch) - - perm = topk(score, self.ratio, batch, self.min_score) - x = x[perm] * score[perm].view(-1, 1) - x = self.multiplier * x if self.multiplier != 1 else x - - batch = batch[perm] - edge_index, edge_attr = filter_adj(edge_index, edge_attr, perm, - num_nodes=score.size(0)) - - return x, edge_index, edge_attr, batch, perm, score[perm] - - def __repr__(self) -> str: - if self.min_score is None: - ratio = f'ratio={self.ratio}' - else: - ratio = f'min_score={self.min_score}' - - return (f'{self.__class__.__name__}({self.in_channels}, {ratio}, ' - f'multiplier={self.multiplier})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/pool/voxel_grid.py b/pytorch_geometric-2.3.1/torch_geometric/nn/pool/voxel_grid.py deleted file mode 100644 index 5df0539..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/pool/voxel_grid.py +++ /dev/null @@ -1,69 +0,0 @@ -from typing import List, Optional, Union - -import torch -from torch import Tensor - -from torch_geometric.utils.repeat import repeat - -try: - from torch_cluster import grid_cluster -except ImportError: - grid_cluster = None - - -def voxel_grid( - pos: Tensor, - size: Union[float, List[float], Tensor], - batch: Optional[Tensor] = None, - start: Optional[Union[float, List[float], Tensor]] = None, - end: Optional[Union[float, List[float], Tensor]] = None, -) -> Tensor: - r"""Voxel grid pooling from the, *e.g.*, `Dynamic Edge-Conditioned Filters - in Convolutional Networks on Graphs `_ - paper, which overlays a regular grid of user-defined size over a point - cloud and clusters all points within the same voxel. - - Args: - pos (torch.Tensor): Node position matrix - :math:`\mathbf{X} \in \mathbb{R}^{(N_1 + \ldots + N_B) \times D}`. - size (float or [float] or Tensor): Size of a voxel (in each dimension). - batch (torch.Tensor, optional): Batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots,B-1\}}^N`, which assigns each - node to a specific example. (default: :obj:`None`) - start (float or [float] or Tensor, optional): Start coordinates of the - grid (in each dimension). If set to :obj:`None`, will be set to the - minimum coordinates found in :attr:`pos`. (default: :obj:`None`) - end (float or [float] or Tensor, optional): End coordinates of the grid - (in each dimension). If set to :obj:`None`, will be set to the - maximum coordinates found in :attr:`pos`. (default: :obj:`None`) - - :rtype: :class:`torch.Tensor` - """ - - if grid_cluster is None: - raise ImportError('`voxel_grid` requires `torch-cluster`.') - - pos = pos.unsqueeze(-1) if pos.dim() == 1 else pos - num_nodes, dim = pos.size() - - size = size.tolist() if torch.is_tensor(size) else size - start = start.tolist() if torch.is_tensor(start) else start - end = end.tolist() if torch.is_tensor(end) else end - - size, start, end = repeat(size, dim), repeat(start, dim), repeat(end, dim) - - if batch is None: - batch = torch.zeros(pos.shape[0], dtype=torch.long) - - pos = torch.cat([pos, batch.unsqueeze(-1).type_as(pos)], dim=-1) - size = size + [1] - start = None if start is None else start + [0] - end = None if end is None else end + [batch.max().item()] - - size = torch.tensor(size, dtype=pos.dtype, device=pos.device) - if start is not None: - start = torch.tensor(start, dtype=pos.dtype, device=pos.device) - if end is not None: - end = torch.tensor(end, dtype=pos.dtype, device=pos.device) - - return grid_cluster(pos, size, start, end) diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/resolver.py b/pytorch_geometric-2.3.1/torch_geometric/nn/resolver.py deleted file mode 100644 index ce6a949..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/resolver.py +++ /dev/null @@ -1,158 +0,0 @@ -import inspect -from typing import Any, Optional, Union - -import torch -from torch import Tensor -from torch.optim import Optimizer -from torch.optim.lr_scheduler import ReduceLROnPlateau - -from torch_geometric.nn.lr_scheduler import ( - ConstantWithWarmupLR, - CosineWithWarmupLR, - CosineWithWarmupRestartsLR, - LinearWithWarmupLR, - PolynomialWithWarmupLR, -) -from torch_geometric.resolver import normalize_string, resolver - -try: - from torch.optim.lr_scheduler import LRScheduler -except ImportError: # PyTorch < 2.0 - from torch.optim.lr_scheduler import _LRScheduler as LRScheduler - -# Activation Resolver ######################################################### - - -def swish(x: Tensor) -> Tensor: - return x * x.sigmoid() - - -def activation_resolver(query: Union[Any, str] = 'relu', *args, **kwargs): - import torch - base_cls = torch.nn.Module - base_cls_repr = 'Act' - acts = [ - act for act in vars(torch.nn.modules.activation).values() - if isinstance(act, type) and issubclass(act, base_cls) - ] - acts += [ - swish, - ] - act_dict = {} - return resolver(acts, act_dict, query, base_cls, base_cls_repr, *args, - **kwargs) - - -# Normalization Resolver ###################################################### - - -def normalization_resolver(query: Union[Any, str], *args, **kwargs): - import torch - - import torch_geometric.nn.norm as norm - base_cls = torch.nn.Module - base_cls_repr = 'Norm' - norms = [ - norm for norm in vars(norm).values() - if isinstance(norm, type) and issubclass(norm, base_cls) - ] - norm_dict = {} - return resolver(norms, norm_dict, query, base_cls, base_cls_repr, *args, - **kwargs) - - -# Aggregation Resolver ######################################################## - - -def aggregation_resolver(query: Union[Any, str], *args, **kwargs): - import torch_geometric.nn.aggr as aggr - base_cls = aggr.Aggregation - aggrs = [ - aggr for aggr in vars(aggr).values() - if isinstance(aggr, type) and issubclass(aggr, base_cls) - ] - aggr_dict = { - 'add': aggr.SumAggregation, - } - return resolver(aggrs, aggr_dict, query, base_cls, None, *args, **kwargs) - - -# Learning Rate Scheduler Resolver ############################################ - - -def lr_scheduler_resolver( - query: Union[Any, str], - optimizer: Optimizer, - warmup_ratio_or_steps: Optional[Union[float, int]] = 0.1, - num_training_steps: Optional[int] = None, - **kwargs, -) -> Union[LRScheduler, ReduceLROnPlateau]: - r"""A resolver to obtain a learning rate scheduler implemented in either - PyG or PyTorch from its name or type. - - Args: - query (Any or str): The query name of the learning rate scheduler. - optimizer (Optimizer): The optimizer to be scheduled. - warmup_ratio_or_steps (float or int, optional): The number of warmup - steps. If given as a `float`, it will act as a ratio that gets - multiplied with the number of training steps to obtain the number - of warmup steps. Only required for warmup-based LR schedulers. - (default: :obj:`0.1`) - num_training_steps (int, optional): The total number of training steps. - (default: :obj:`None`) - **kwargs (optional): Additional arguments of the LR scheduler. - """ - if not isinstance(query, str): - return query - - if isinstance(warmup_ratio_or_steps, float): - if warmup_ratio_or_steps < 0 or warmup_ratio_or_steps > 1: - raise ValueError(f"`warmup_ratio_or_steps` needs to be between " - f"0.0 and 1.0 when given as a floating point " - f"number (got {warmup_ratio_or_steps}).") - if num_training_steps is not None: - warmup_steps = round(warmup_ratio_or_steps * num_training_steps) - elif isinstance(warmup_ratio_or_steps, int): - if warmup_ratio_or_steps < 0: - raise ValueError(f"`warmup_ratio_or_steps` needs to be positive " - f"when given as an integer " - f"(got {warmup_ratio_or_steps}).") - warmup_steps = warmup_ratio_or_steps - else: - raise ValueError(f"Found invalid type of `warmup_ratio_or_steps` " - f"(got {type(warmup_ratio_or_steps)})") - - base_cls = LRScheduler - classes = [ - scheduler for scheduler in vars(torch.optim.lr_scheduler).values() - if isinstance(scheduler, type) and issubclass(scheduler, base_cls) - ] + [ReduceLROnPlateau] - - customized_lr_schedulers = [ - ConstantWithWarmupLR, - LinearWithWarmupLR, - CosineWithWarmupLR, - CosineWithWarmupRestartsLR, - PolynomialWithWarmupLR, - ] - classes += customized_lr_schedulers - - query_repr = normalize_string(query) - base_cls_repr = normalize_string('LR') - - for cls in classes: - cls_repr = normalize_string(cls.__name__) - if query_repr in [cls_repr, cls_repr.replace(base_cls_repr, '')]: - if inspect.isclass(cls): - if cls in customized_lr_schedulers: - cls_keys = inspect.signature(cls).parameters.keys() - if 'num_warmup_steps' in cls_keys: - kwargs['num_warmup_steps'] = warmup_steps - if 'num_training_steps' in cls_keys: - kwargs['num_training_steps'] = num_training_steps - obj = cls(optimizer, **kwargs) - return obj - return cls - - choices = set(cls.__name__ for cls in classes) - raise ValueError(f"Could not resolve '{query}' among choices {choices}") diff --git a/pytorch_geometric-2.3.1/torch_geometric/nn/summary.py b/pytorch_geometric-2.3.1/torch_geometric/nn/summary.py deleted file mode 100644 index f37dd73..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/nn/summary.py +++ /dev/null @@ -1,166 +0,0 @@ -from collections import defaultdict -from typing import Any, List, Optional, Union - -import torch -from torch.jit import ScriptModule -from torch.nn import Module - -from torch_geometric.nn.conv import MessagePassing -from torch_geometric.typing import SparseTensor - - -def summary( - model: torch.nn.Module, - *args, - max_depth: int = 3, - leaf_module: Optional[Union[Module, List[Module]]] = 'MessagePassing', - **kwargs, -) -> str: - r"""Summarizes a given :class:`torch.nn.Module`. - The summarized information includes (1) layer names, (2) input and output - shapes, and (3) the number of parameters. - - .. code-block:: python - - import torch - from torch_geometric.nn import GCN, summary - - model = GCN(128, 64, num_layers=2, out_channels=32) - x = torch.randn(100, 128) - edge_index = torch.randint(100, size=(2, 20)) - - print(summary(model, x, edge_index)) - - .. code-block:: - - +---------------------+---------------------+--------------+--------+ - | Layer | Input Shape | Output Shape | #Param | - |---------------------+---------------------+--------------+--------| - | GCN | [100, 128], [2, 20] | [100, 32] | 10,336 | - | ├─(act)ReLU | [100, 64] | [100, 64] | -- | - | ├─(convs)ModuleList | -- | -- | 10,336 | - | │ └─(0)GCNConv | [100, 128], [2, 20] | [100, 64] | 8,256 | - | │ └─(1)GCNConv | [100, 64], [2, 20] | [100, 32] | 2,080 | - +---------------------+---------------------+--------------+--------+ - - Args: - model (torch.nn.Module): The model to summarize. - *args: The arguments of the :obj:`model`. - max_depth (int, optional): The depth of nested layers to display. - Any layers deeper than this depth will not be displayed in the - summary. (default: :obj:`3`) - leaf_module (torch.nn.Module or [torch.nn.Module], optional): The - modules to be treated as leaf modules, whose submodules are - excluded from the summary. - (default: :class:`~torch_geometric.nn.conv.MessagePassing`) - **kwargs: Additional arguments of the :obj:`model`. - """ - # NOTE This is just for the doc-string to render nicely: - if leaf_module == 'MessagePassing': - leaf_module = MessagePassing - - def register_hook(info): - def hook(module, inputs, output): - info['input_shape'].append(get_shape(inputs)) - info['output_shape'].append(get_shape(output)) - - return hook - - hooks = {} - depth = 0 - stack = [(model.__class__.__name__, model, depth)] - - info_list = [] - input_shape = defaultdict(list) - output_shape = defaultdict(list) - while stack: - name, module, depth = stack.pop() - module_id = id(module) - - if name.startswith('(_'): # Do not summarize private modules. - continue - - if module_id in hooks: # Avoid duplicated hooks. - hooks[module_id].remove() - - info = {} - info['name'] = name - info['input_shape'] = input_shape[module_id] - info['output_shape'] = output_shape[module_id] - info['depth'] = depth - num_params = sum(p.numel() for p in module.parameters()) - info['#param'] = f'{num_params:,}' if num_params > 0 else '--' - info_list.append(info) - - if not isinstance(module, ScriptModule): - hooks[module_id] = module.register_forward_hook( - register_hook(info)) - - if depth >= max_depth: - continue - - if (leaf_module is not None and isinstance(module, leaf_module)): - continue - - module_items = reversed(module._modules.items()) - stack += [(f"({name}){mod.__class__.__name__}", mod, depth + 1) - for name, mod in module_items if mod is not None] - - training = model.training - model.eval() - - with torch.no_grad(): - model(*args, **kwargs) - - model.train(training) - - for h in hooks.values(): # Remove hooks. - h.remove() - - info_list = postprocess(info_list) - return make_table(info_list, max_depth=max_depth) - - -def get_shape(inputs: Any) -> str: - if not isinstance(inputs, (tuple, list)): - inputs = (inputs, ) - - out = [] - for x in inputs: - if isinstance(x, SparseTensor): - out.append(str(list(x.sizes()))) - elif hasattr(x, 'size'): - out.append(str(list(x.size()))) - return ', '.join(out) - - -def postprocess(info_list: List[dict]) -> List[dict]: - for idx, info in enumerate(info_list): - depth = info['depth'] - if idx > 0: # root module (0) is exclued - if depth == 1: - prefix = '├─' - else: - prefix = f"{'│ '*(depth-1)}└─" - info['name'] = prefix + info['name'] - - if info['input_shape']: - info['input_shape'] = info['input_shape'].pop(0) - info['output_shape'] = info['output_shape'].pop(0) - else: - info['input_shape'] = '--' - info['output_shape'] = '--' - return info_list - - -def make_table(info_list: List[dict], max_depth: int) -> str: - from tabulate import tabulate - content = [['Layer', 'Input Shape', 'Output Shape', '#Param']] - for info in info_list: - content.append([ - info['name'], - info['input_shape'], - info['output_shape'], - info['#param'], - ]) - return tabulate(content, headers='firstrow', tablefmt='psql') diff --git a/pytorch_geometric-2.3.1/torch_geometric/profile/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/profile/__init__.py deleted file mode 100644 index 3ac26b1..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/profile/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -from .profile import profileit, timeit, get_stats_summary -from .profile import trace_handler, rename_profile_file, torch_profile -from .utils import count_parameters -from .utils import get_model_size -from .utils import get_data_size -from .utils import get_cpu_memory_from_gc -from .utils import get_gpu_memory_from_gc -from .utils import get_gpu_memory_from_nvidia_smi -from .benchmark import benchmark - -__all__ = [ - 'profileit', - 'timeit', - 'get_stats_summary', - 'trace_handler', - 'rename_profile_file', - 'torch_profile', - 'count_parameters', - 'get_model_size', - 'get_data_size', - 'get_cpu_memory_from_gc', - 'get_gpu_memory_from_gc', - 'get_gpu_memory_from_nvidia_smi', - 'benchmark', -] - -classes = __all__ diff --git a/pytorch_geometric-2.3.1/torch_geometric/profile/benchmark.py b/pytorch_geometric-2.3.1/torch_geometric/profile/benchmark.py deleted file mode 100644 index f71ef9c..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/profile/benchmark.py +++ /dev/null @@ -1,99 +0,0 @@ -import time -from typing import Any, Callable, List, Optional, Tuple - -import torch -from torch import Tensor - -from torch_geometric.utils import is_torch_sparse_tensor - - -def benchmark( - funcs: List[Callable], - args: Tuple[Any], - num_steps: int, - func_names: Optional[List[str]] = None, - num_warmups: int = 10, - backward: bool = False, -): - r"""Benchmark a list of functions :obj:`funcs` that receive the same set - of arguments :obj:`args`. - - Args: - funcs ([Callable]): The list of functions to benchmark. - args ((Any, )): The arguments to pass to the functions. - num_steps (int): The number of steps to run the benchmark. - func_names ([str], optional): The names of the functions. If not given, - will try to infer the name from the function itself. - (default: :obj:`None`) - num_warmups (int, optional): The number of warmup steps. - (default: :obj:`10`) - backward (bool, optional): If set to :obj:`True`, will benchmark both - forward and backward passes. (default: :obj:`False`) - """ - from tabulate import tabulate - - if num_steps <= 0: - raise ValueError(f"'num_steps' must be a positive integer " - f"(got {num_steps})") - - if num_warmups <= 0: - raise ValueError(f"'num_warmups' must be a positive integer " - f"(got {num_warmups})") - - if func_names is None: - func_names = [get_func_name(func) for func in funcs] - - if len(funcs) != len(func_names): - raise ValueError(f"Length of 'funcs' (got {len(funcs)}) and " - f"'func_names' (got {len(func_names)}) must be equal") - - ts: List[List[str]] = [] - for func, name in zip(funcs, func_names): - t_forward = t_backward = 0 - for i in range(num_warmups + num_steps): - args = [ - arg.detach().requires_grad_(backward) - if isinstance(arg, Tensor) and arg.is_floating_point() - and not is_torch_sparse_tensor(arg) else arg for arg in args - ] - - if torch.cuda.is_available(): - torch.cuda.synchronize() - t_start = time.perf_counter() - - out = func(*args) - - if torch.cuda.is_available(): - torch.cuda.synchronize() - if i >= num_warmups: - t_forward += time.perf_counter() - t_start - - if backward: - out_grad = torch.randn_like(out) - t_start = time.perf_counter() - - out.backward(out_grad) - - if torch.cuda.is_available(): - torch.cuda.synchronize() - if i >= num_warmups: - t_backward += time.perf_counter() - t_start - - ts.append([name, f'{t_forward:.4f}s']) - if backward: - ts[-1].append(f'{t_backward:.4f}s') - ts[-1].append(f'{t_forward + t_backward:.4f}s') - - header = ['Name', 'Forward'] - if backward: - header.extend(['Backward', 'Total']) - - print(tabulate(ts, headers=header, tablefmt='psql')) - - -def get_func_name(func: Callable) -> str: - if hasattr(func, '__name__'): - return func.__name__ - elif hasattr(func, '__class__'): - return func.__class__.__name__ - raise ValueError("Could not infer name for function '{func}'") diff --git a/pytorch_geometric-2.3.1/torch_geometric/profile/profile.py b/pytorch_geometric-2.3.1/torch_geometric/profile/profile.py deleted file mode 100644 index eec2002..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/profile/profile.py +++ /dev/null @@ -1,237 +0,0 @@ -import os -import pathlib -import time -from contextlib import ContextDecorator, contextmanager -from typing import Any, List, NamedTuple, Tuple - -import torch -from torch.profiler import ProfilerActivity, profile - -from torch_geometric.profile.utils import ( - byte_to_megabyte, - get_gpu_memory_from_nvidia_smi, -) - - -class Stats(NamedTuple): - time: float - max_allocated_cuda: float - max_reserved_cuda: float - max_active_cuda: float - nvidia_smi_free_cuda: float - nvidia_smi_used_cuda: float - - -class StatsSummary(NamedTuple): - time_mean: float - time_std: float - max_allocated_cuda: float - max_reserved_cuda: float - max_active_cuda: float - min_nvidia_smi_free_cuda: float - max_nvidia_smi_used_cuda: float - - -def profileit(): # pragma: no cover - r"""A decorator to facilitate profiling a function, *e.g.*, obtaining - training runtime and memory statistics of a specific model on a specific - dataset. - Returns a :obj:`Stats` object with the attributes :obj:`time`, - :obj:`max_active_cuda`, :obj:`max_reserved_cuda`, :obj:`max_active_cuda`, - :obj:`nvidia_smi_free_cuda`, :obj:`nvidia_smi_used_cuda`. - - .. code-block:: python - - @profileit() - def train(model, optimizer, x, edge_index, y): - optimizer.zero_grad() - out = model(x, edge_index) - loss = criterion(out, y) - loss.backward() - optimizer.step() - return float(loss) - - loss, stats = train(model, x, edge_index, y) - """ - def decorator(func): - def wrapper(*args, **kwargs) -> Tuple[Any, Stats]: - from pytorch_memlab import LineProfiler - - model = args[0] - if not isinstance(model, torch.nn.Module): - raise AttributeError( - 'First argument for profiling needs to be torch.nn.Module') - - device = None - for arg in list(args) + list(kwargs.values()): - if isinstance(arg, torch.Tensor): - device = arg.get_device() - break - if device is None: - raise AttributeError( - "Could not infer CUDA device from the args in the " - "function being profiled") - if device == -1: - raise RuntimeError( - "The profiling decorator does not support profiling " - "on non CUDA devices") - - # Init `pytorch_memlab` for analyzing the model forward pass: - line_profiler = LineProfiler(target_gpu=device) - line_profiler.enable() - line_profiler.add_function(args[0].forward) - - start = torch.cuda.Event(enable_timing=True) - end = torch.cuda.Event(enable_timing=True) - start.record() - - out = func(*args, **kwargs) - - end.record() - torch.cuda.synchronize() - time = start.elapsed_time(end) / 1000 - - # Get the global memory statistics collected by `pytorch_memlab`: - memlab = read_from_memlab(line_profiler) - max_allocated_cuda, max_reserved_cuda, max_active_cuda = memlab - line_profiler.disable() - - # Get additional information from `nvidia-smi`: - free_cuda, used_cuda = get_gpu_memory_from_nvidia_smi( - device=device) - - stats = Stats(time, max_allocated_cuda, max_reserved_cuda, - max_active_cuda, free_cuda, used_cuda) - - return out, stats - - return wrapper - - return decorator - - -class timeit(ContextDecorator): - r"""A context decorator to facilitate timing a function, *e.g.*, obtaining - the runtime of a specific model on a specific dataset. - - .. code-block:: python - - @torch.no_grad() - def test(model, x, edge_index): - return model(x, edge_index) - - with timeit() as t: - z = test(model, x, edge_index) - time = t.duration - - Args: - log (bool, optional): If set to :obj:`False`, will not log any runtime - to the console. (default: :obj:`True`) - avg_time_divisor (int, optional): If set to a value greater than - :obj:`1`, will divide the total time by this value. Useful for - calculating the average of runtimes within a for-loop. - (default: :obj:`0`) - """ - def __init__(self, log: bool = True, avg_time_divisor: int = 0): - self.log = log - self.avg_time_divisor = avg_time_divisor - - def __enter__(self): - if torch.cuda.is_available(): - torch.cuda.synchronize() - self.t_start = time.time() - return self - - def __exit__(self, *args): - if torch.cuda.is_available(): - torch.cuda.synchronize() - self.t_end = time.time() - self.duration = self.t_end - self.t_start - if self.avg_time_divisor > 1: - self.duration = self.duration / self.avg_time_divisor - if self.log: # pragma: no cover - print(f'Time: {self.duration:.8f}s', flush=True) - - def reset(self): - r"""Prints the duration and resets current timer.""" - if self.t_start is None: - raise RuntimeError("Timer wasn't started.") - else: - self.__exit__() - self.__enter__() - - -def get_stats_summary(stats_list: List[Stats]): # pragma: no cover - r"""Creates a summary of collected runtime and memory statistics. - Returns a :obj:`StatsSummary` object with the attributes :obj:`time_mean`, - :obj:`time_std`, - :obj:`max_active_cuda`, :obj:`max_reserved_cuda`, :obj:`max_active_cuda`, - :obj:`min_nvidia_smi_free_cuda`, :obj:`max_nvidia_smi_used_cuda`. - - Args: - stats_list (List[Stats]): A list of :obj:`Stats` objects, as returned - by :meth:`~torch_geometric.profile.profileit`. - """ - return StatsSummary( - time_mean=float(torch.tensor([s.time for s in stats_list]).mean()), - time_std=float(torch.tensor([s.time for s in stats_list]).std()), - max_allocated_cuda=max([s.max_allocated_cuda for s in stats_list]), - max_reserved_cuda=max([s.max_reserved_cuda for s in stats_list]), - max_active_cuda=max([s.max_active_cuda for s in stats_list]), - min_nvidia_smi_free_cuda=min( - [s.nvidia_smi_free_cuda for s in stats_list]), - max_nvidia_smi_used_cuda=max( - [s.nvidia_smi_used_cuda for s in stats_list]), - ) - - -############################################################################### - - -def read_from_memlab(line_profiler: Any) -> List[float]: # pragma: no cover - from pytorch_memlab.line_profiler.line_records import LineRecords - - # See: https://pytorch.org/docs/stable/cuda.html#torch.cuda.memory_stats - - track_stats = [ # Different statistic can be collected as needed. - 'allocated_bytes.all.peak', - 'reserved_bytes.all.peak', - 'active_bytes.all.peak', - ] - - records = LineRecords(line_profiler._raw_line_records, - line_profiler._code_infos) - stats = records.display(None, track_stats)._line_records - return [byte_to_megabyte(x) for x in stats.values.max(axis=0).tolist()] - - -def trace_handler(p): - if torch.cuda.is_available(): - profile_sort = 'self_cuda_time_total' - else: - profile_sort = 'self_cpu_time_total' - output = p.key_averages().table(sort_by=profile_sort) - print(output) - profile_dir = str(pathlib.Path.cwd()) + '/' - timeline_file = profile_dir + 'timeline' + '.json' - p.export_chrome_trace(timeline_file) - - -def rename_profile_file(*args): - profile_dir = str(pathlib.Path.cwd()) + '/' - timeline_file = profile_dir + 'profile' - for arg in args: - timeline_file += '-' + arg - timeline_file += '.json' - os.rename('timeline.json', timeline_file) - - -@contextmanager -def torch_profile(): - activities = [ProfilerActivity.CPU] - if torch.cuda.is_available(): - activities.append(ProfilerActivity.CUDA) - - with profile(activities=activities, on_trace_ready=trace_handler) as p: - yield - p.step() diff --git a/pytorch_geometric-2.3.1/torch_geometric/resolver.py b/pytorch_geometric-2.3.1/torch_geometric/resolver.py deleted file mode 100644 index 6b642d5..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/resolver.py +++ /dev/null @@ -1,41 +0,0 @@ -import inspect -from typing import Any, Dict, List, Optional, Union - - -def normalize_string(s: str) -> str: - return s.lower().replace('-', '').replace('_', '').replace(' ', '') - - -def resolver(classes: List[Any], class_dict: Dict[str, Any], - query: Union[Any, str], base_cls: Optional[Any], - base_cls_repr: Optional[str], *args, **kwargs): - - if not isinstance(query, str): - return query - - query_repr = normalize_string(query) - if base_cls_repr is None: - base_cls_repr = base_cls.__name__ if base_cls else '' - base_cls_repr = normalize_string(base_cls_repr) - - for key_repr, cls in class_dict.items(): - if query_repr == key_repr: - if inspect.isclass(cls): - obj = cls(*args, **kwargs) - assert callable(obj) - return obj - assert callable(cls) - return cls - - for cls in classes: - cls_repr = normalize_string(cls.__name__) - if query_repr in [cls_repr, cls_repr.replace(base_cls_repr, '')]: - if inspect.isclass(cls): - obj = cls(*args, **kwargs) - assert callable(obj) - return obj - assert callable(cls) - return cls - - choices = set(cls.__name__ for cls in classes) | set(class_dict.keys()) - raise ValueError(f"Could not resolve '{query}' among choices {choices}") diff --git a/pytorch_geometric-2.3.1/torch_geometric/sampler/base.py b/pytorch_geometric-2.3.1/torch_geometric/sampler/base.py deleted file mode 100644 index 9b34cce..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/sampler/base.py +++ /dev/null @@ -1,470 +0,0 @@ -import copy -import math -from abc import ABC -from dataclasses import dataclass -from enum import Enum -from typing import Any, Dict, List, Optional, Union - -import torch -from torch import Tensor - -from torch_geometric.data import Data, FeatureStore, GraphStore, HeteroData -from torch_geometric.typing import EdgeType, EdgeTypeStr, NodeType, OptTensor -from torch_geometric.utils.mixin import CastMixin - - -class DataType(Enum): - r"""The data type a sampler is operating on.""" - homogeneous = 'homogeneous' - heterogeneous = 'heterogeneous' - remote = 'remote' - - @classmethod - def from_data(cls, data: Any): - if isinstance(data, Data): - return cls.homogeneous - elif isinstance(data, HeteroData): - return cls.heterogeneous - elif (isinstance(data, (list, tuple)) and len(data) == 2 - and isinstance(data[0], FeatureStore) - and isinstance(data[1], GraphStore)): - return cls.remote - - raise ValueError(f"Expected a 'Data', 'HeteroData', or a tuple of " - f"'FeatureStore' and 'GraphStore' " - f"(got '{type(data)}')") - - -@dataclass -class NodeSamplerInput(CastMixin): - r"""The sampling input of - :meth:`~torch_geometric.sampler.BaseSampler.sample_from_nodes`. - - Args: - input_id (torch.Tensor, optional): The indices of the data loader input - of the current mini-batch. - node (torch.Tensor): The indices of seed nodes to start sampling from. - time (torch.Tensor, optional): The timestamp for the seed nodes. - (default: :obj:`None`) - input_type (str, optional): The input node type (in case of sampling in - a heterogeneous graph). (default: :obj:`None`) - """ - input_id: OptTensor - node: Tensor - time: OptTensor = None - input_type: Optional[NodeType] = None - - def __getitem__(self, index: Union[Tensor, Any]) -> 'NodeSamplerInput': - if not isinstance(index, Tensor): - index = torch.tensor(index, dtype=torch.long) - - return NodeSamplerInput( - self.input_id[index] if self.input_id is not None else index, - self.node[index], - self.time[index] if self.time is not None else None, - self.input_type, - ) - - -@dataclass -class EdgeSamplerInput(CastMixin): - r"""The sampling input of - :meth:`~torch_geometric.sampler.BaseSampler.sample_from_edges`. - - Args: - input_id (torch.Tensor, optional): The indices of the data loader input - of the current mini-batch. - row (torch.Tensor): The source node indices of seed links to start - sampling from. - col (torch.Tensor): The destination node indices of seed links to start - sampling from. - label (torch.Tensor, optional): The label for the seed links. - (default: :obj:`None`) - time (torch.Tensor, optional): The timestamp for the seed links. - (default: :obj:`None`) - input_type (Tuple[str, str, str], optional): The input edge type (in - case of sampling in a heterogeneous graph). (default: :obj:`None`) - """ - input_id: OptTensor - row: Tensor - col: Tensor - label: OptTensor = None - time: OptTensor = None - input_type: Optional[EdgeType] = None - - def __getitem__(self, index: Union[Tensor, Any]) -> 'EdgeSamplerInput': - if not isinstance(index, Tensor): - index = torch.tensor(index, dtype=torch.long) - - return EdgeSamplerInput( - self.input_id[index] if self.input_id is not None else index, - self.row[index], - self.col[index], - self.label[index] if self.label is not None else None, - self.time[index] if self.time is not None else None, - self.input_type, - ) - - -@dataclass -class SamplerOutput(CastMixin): - r"""The sampling output of a :class:`~torch_geometric.sampler.BaseSampler` - on homogeneous graphs. - - Args: - node (torch.Tensor): The sampled nodes in the original graph. - row (torch.Tensor): The source node indices of the sampled subgraph. - Indices must be re-indexed to :obj:`{ 0, ..., num_nodes - 1 }` - corresponding to the nodes in the :obj:`node` tensor. - col (torch.Tensor): The destination node indices of the sampled - subgraph. - Indices must be re-indexed to :obj:`{ 0, ..., num_nodes - 1 }` - corresponding to the nodes in the :obj:`node` tensor. - edge (torch.Tensor, optional): The sampled edges in the original graph. - This tensor is used to obtain edge features from the original - graph. If no edge attributes are present, it may be omitted. - batch (torch.Tensor, optional): The vector to identify the seed node - for each sampled node. Can be present in case of disjoint subgraph - sampling per seed node. (default: :obj:`None`) - num_sampled_nodes (List[int], optional): The number of sampled nodes - per hop. (default: :obj:`None`) - num_sampled_edges (List[int], optional): The number of sampled edges - per hop. (default: :obj:`None`) - metadata: (Any, optional): Additional metadata information. - (default: :obj:`None`) - """ - node: Tensor - row: Tensor - col: Tensor - edge: OptTensor - batch: OptTensor = None - num_sampled_nodes: Optional[List[int]] = None - num_sampled_edges: Optional[List[int]] = None - # TODO(manan): refine this further; it does not currently define a proper - # API for the expected output of a sampler. - metadata: Optional[Any] = None - - -@dataclass -class HeteroSamplerOutput(CastMixin): - r"""The sampling output of a :class:`~torch_geometric.sampler.BaseSampler` - on heterogeneous graphs. - - Args: - node (Dict[str, torch.Tensor]): The sampled nodes in the original graph - for each node type. - row (Dict[Tuple[str, str, str], torch.Tensor]): The source node indices - of the sampled subgraph for each edge type. - Indices must be re-indexed to :obj:`{ 0, ..., num_nodes - 1 }` - corresponding to the nodes in the :obj:`node` tensor of the source - node type. - col (Dict[Tuple[str, str, str], torch.Tensor]): The destination node - indices of the sampled subgraph for each edge type. - Indices must be re-indexed to :obj:`{ 0, ..., num_nodes - 1 }` - corresponding to the nodes in the :obj:`node` tensor of the - destination node type. - edge (Dict[Tuple[str, str, str], torch.Tensor], optional): The sampled - edges in the original graph for each edge type. - This tensor is used to obtain edge features from the original - graph. If no edge attributes are present, it may be omitted. - batch (Dict[str, torch.Tensor], optional): The vector to identify the - seed node for each sampled node for each node type. Can be present - in case of disjoint subgraph sampling per seed node. - (default: :obj:`None`) - num_sampled_nodes (Dict[str, List[int]], optional): The number of - sampled nodes for each node type and each layer. - (default: :obj:`None`) - num_sampled_edges (Dict[EdgeType, List[int]], optional): The number of - sampled edges for each edge type and each layer. - (default: :obj:`None`) - metadata: (Any, optional): Additional metadata information. - (default: :obj:`None`) - """ - node: Dict[NodeType, Tensor] - row: Dict[EdgeType, Tensor] - col: Dict[EdgeType, Tensor] - edge: Optional[Dict[EdgeType, Tensor]] - batch: Optional[Dict[NodeType, Tensor]] = None - num_sampled_nodes: Optional[Dict[NodeType, List[int]]] = None - num_sampled_edges: Optional[Dict[EdgeType, List[int]]] = None - # TODO(manan): refine this further; it does not currently define a proper - # API for the expected output of a sampler. - metadata: Optional[Any] = None - - -@dataclass(frozen=True) -class NumNeighbors: - r"""The number of neighbors to sample in a homogeneous or heterogeneous - graph. In heterogeneous graphs, may also take in a dictionary denoting - the amount of neighbors to sample for individual edge types. - - Args: - values (List[int] or Dict[Tuple[str, str, str], List[int]]): The - number of neighbors to sample. - If an entry is set to :obj:`-1`, all neighbors will be included. - In heterogeneous graphs, may also take in a dictionary denoting - the amount of neighbors to sample for individual edge types. - default (List[int], optional): The default number of neighbors for edge - types not specified in :obj:`values`. (default: :obj:`None`) - """ - values: Union[List[int], Dict[EdgeTypeStr, List[int]]] - default: Optional[List[int]] = None - - def __init__( - self, - values: Union[List[int], Dict[EdgeType, List[int]]], - default: Optional[List[int]] = None, - ): - if isinstance(values, (tuple, list)) and default is not None: - raise ValueError(f"'default' must be set to 'None' in case a " - f"single list is given as the number of " - f"neighbors (got '{type(default)})'") - - if isinstance(values, dict): - values = {EdgeTypeStr(key): value for key, value in values.items()} - - # Write to `__dict__` since dataclass is annotated with `frozen=True`: - self.__dict__['values'] = values - self.__dict__['default'] = default - - def _get_values( - self, - edge_types: Optional[List[EdgeType]] = None, - mapped: bool = False, - ) -> Union[List[int], Dict[Union[EdgeType, EdgeTypeStr], List[int]]]: - - if edge_types is not None: - if isinstance(self.values, (tuple, list)): - default = self.values - elif isinstance(self.values, dict): - default = self.default - else: - assert False - - out = {} - for edge_type in edge_types: - edge_type_str = EdgeTypeStr(edge_type) - if edge_type_str in self.values: - out[edge_type_str if mapped else edge_type] = ( - self.values[edge_type_str]) - else: - if default is None: - raise ValueError(f"Missing number of neighbors for " - f"edge type '{edge_type}'") - out[edge_type_str if mapped else edge_type] = default - - elif isinstance(self.values, dict) and not mapped: - out = {key.to_tuple(): value for key, value in self.values.items()} - - else: - out = copy.copy(self.values) - - if isinstance(out, dict): - num_hops = set(len(v) for v in out.values()) - if len(num_hops) > 1: - raise ValueError(f"Number of hops must be the same across all " - f"edge types (got {len(num_hops)} different " - f"number of hops)") - - return out - - def get_values( - self, - edge_types: Optional[List[EdgeType]] = None, - ) -> Union[List[int], Dict[EdgeType, List[int]]]: - r"""Returns the number of neighbors. - - Args: - edge_types (List[Tuple[str, str, str]], optional): The edge types - to generate the number of neighbors for. (default: :obj:`None`) - """ - if '_values' in self.__dict__: - return self.__dict__['_values'] - - values = self._get_values(edge_types, mapped=False) - - self.__dict__['_values'] = values - return values - - def get_mapped_values( - self, - edge_types: Optional[List[EdgeType]] = None, - ) -> Union[List[int], Dict[str, List[int]]]: - r"""Returns the number of neighbors. - For heterogeneous graphs, a dictionary is returned in which edge type - tuples are converted to strings. - - Args: - edge_types (List[Tuple[str, str, str]], optional): The edge types - to generate the number of neighbors for. (default: :obj:`None`) - """ - if '_mapped_values' in self.__dict__: - return self.__dict__['_mapped_values'] - - values = self._get_values(edge_types, mapped=True) - - self.__dict__['_mapped_values'] = values - return values - - @property - def num_hops(self) -> int: - r"""Returns the number of hops.""" - if '_num_hops' in self.__dict__: - return self.__dict__['_num_hops'] - - if isinstance(self.values, (tuple, list)): - num_hops = max(len(self.values), len(self.default or [])) - else: # isinstance(self.values, dict): - num_hops = max([0] + [len(v) for v in self.values.values()]) - num_hops = max(num_hops, len(self.default or [])) - - self.__dict__['_num_hops'] = num_hops - return num_hops - - def __len__(self) -> int: - r"""Returns the number of hops.""" - return self.num_hops - - -class NegativeSamplingMode(Enum): - # 'binary': Randomly sample negative edges in the graph. - binary = 'binary' - # 'triplet': Randomly sample negative destination nodes for each positive - # source node. - triplet = 'triplet' - - -@dataclass -class NegativeSampling(CastMixin): - r"""The negative sampling configuration of a - :class:`~torch_geometric.sampler.BaseSampler` when calling - :meth:`~torch_geometric.sampler.BaseSampler.sample_from_edges`. - - Args: - mode (str): The negative sampling mode - (:obj:`"binary"` or :obj:`"triplet"`). - If set to :obj:`"binary"`, will randomly sample negative links - from the graph. - If set to :obj:`"triplet"`, will randomly sample negative - destination nodes for each positive source node. - amount (int or float, optional): The ratio of sampled negative edges to - the number of positive edges. (default: :obj:`1`) - weight (torch.Tensor, optional): A node-level vector determining the - sampling of nodes. Does not necessariyl need to sum up to one. - If not given, negative nodes will be sampled uniformly. - (default: :obj:`None`) - """ - mode: NegativeSamplingMode - amount: Union[int, float] = 1 - weight: Optional[Tensor] = None - - def __init__( - self, - mode: Union[NegativeSamplingMode, str], - amount: Union[int, float] = 1, - weight: Optional[Tensor] = None, - ): - self.mode = NegativeSamplingMode(mode) - self.amount = amount - self.weight = weight - - if self.amount <= 0: - raise ValueError(f"The attribute 'amount' needs to be positive " - f"for '{self.__class__.__name__}' " - f"(got {self.amount})") - - if self.is_triplet(): - if self.amount != math.ceil(self.amount): - raise ValueError(f"The attribute 'amount' needs to be an " - f"integer for '{self.__class__.__name__}' " - f"with 'triplet' negative sampling " - f"(got {self.amount}).") - self.amount = math.ceil(self.amount) - - def is_binary(self) -> bool: - return self.mode == NegativeSamplingMode.binary - - def is_triplet(self) -> bool: - return self.mode == NegativeSamplingMode.triplet - - def sample(self, num_samples: int, - num_nodes: Optional[int] = None) -> Tensor: - r"""Generates :obj:`num_samples` negative samples.""" - if self.weight is None: - if num_nodes is None: - raise ValueError( - f"Cannot sample negatives in '{self.__class__.__name__}' " - f"without passing the 'num_nodes' argument") - return torch.randint(num_nodes, (num_samples, )) - - if num_nodes is not None and self.weight.numel() != num_nodes: - raise ValueError( - f"The 'weight' attribute in '{self.__class__.__name__}' " - f"needs to match the number of nodes {num_nodes} " - f"(got {self.weight.numel()})") - return torch.multinomial(self.weight, num_samples, replacement=True) - - -class BaseSampler(ABC): - r"""An abstract base class that initializes a graph sampler and provides - :meth:`sample_from_nodes` and :meth:`sample_from_edges` routines. - - .. note :: - - Any data stored in the sampler will be *replicated* across data loading - workers that use the sampler since each data loading worker holds its - own instance of a sampler. - As such, it is recommended to limit the amount of information stored in - the sampler. - """ - def sample_from_nodes( - self, - index: NodeSamplerInput, - **kwargs, - ) -> Union[HeteroSamplerOutput, SamplerOutput]: - r"""Performs sampling from the nodes specified in :obj:`index`, - returning a sampled subgraph in the specified output format. - - The :obj:`index` is a tuple holding the following information: - - 1. The example indices of the seed nodes - 2. The node indices to start sampling from - 3. The timestamps of the given seed nodes (optional) - - Args: - index (NodeSamplerInput): The node sampler input object. - """ - raise NotImplementedError - - def sample_from_edges( - self, - index: EdgeSamplerInput, - neg_sampling: Optional[NegativeSampling] = None, - ) -> Union[HeteroSamplerOutput, SamplerOutput]: - r"""Performs sampling from the edges specified in :obj:`index`, - returning a sampled subgraph in the specified output format. - - The :obj:`index` is a tuple holding the following information: - - 1. The example indices of the seed links - 2. The source node indices to start sampling from - 3. The destination node indices to start sampling from - 4. The labels of the seed links (optional) - 5. The timestamps of the given seed nodes (optional) - - Args: - index (EdgeSamplerInput): The edge sampler input object. - neg_sampling (NegativeSampling, optional): The negative sampling - configuration. (default: :obj:`None`) - """ - raise NotImplementedError - - @property - def edge_permutation(self) -> Union[OptTensor, Dict[EdgeType, OptTensor]]: - r"""If the sampler performs any modification of edge ordering in the - original graph, this function is expected to return the permutation - tensor that defines the permutation from the edges in the original - graph and the edges used in the sampler. If no such permutation was - applied, :obj:`None` is returned. For heterogeneous graphs, the - expected return type is a permutation tensor for each edge type.""" - return None diff --git a/pytorch_geometric-2.3.1/torch_geometric/sampler/neighbor_sampler.py b/pytorch_geometric-2.3.1/torch_geometric/sampler/neighbor_sampler.py deleted file mode 100644 index 7c94eb6..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/sampler/neighbor_sampler.py +++ /dev/null @@ -1,623 +0,0 @@ -import copy -import math -import sys -import warnings -from typing import Callable, Dict, List, Optional, Tuple, Union - -import torch -from torch import Tensor - -import torch_geometric.typing -from torch_geometric.data import ( - Data, - FeatureStore, - GraphStore, - HeteroData, - remote_backend_utils, -) -from torch_geometric.data.graph_store import EdgeLayout -from torch_geometric.sampler import ( - BaseSampler, - EdgeSamplerInput, - HeteroSamplerOutput, - NegativeSampling, - NodeSamplerInput, - SamplerOutput, -) -from torch_geometric.sampler.base import DataType, NumNeighbors -from torch_geometric.sampler.utils import remap_keys, to_csc, to_hetero_csc -from torch_geometric.typing import EdgeType, NodeType, OptTensor - -NumNeighborsType = Union[NumNeighbors, List[int], Dict[EdgeType, List[int]]] - - -class NeighborSampler(BaseSampler): - r"""An implementation of an in-memory (heterogeneous) neighbor sampler used - by :class:`~torch_geometric.loader.NeighborLoader`.""" - def __init__( - self, - data: Union[Data, HeteroData, Tuple[FeatureStore, GraphStore]], - num_neighbors: NumNeighborsType, - replace: bool = False, - directed: bool = True, - disjoint: bool = False, - temporal_strategy: str = 'uniform', - time_attr: Optional[str] = None, - is_sorted: bool = False, - share_memory: bool = False, - ): - if not torch_geometric.typing.WITH_PYG_LIB and sys.platform == 'linux': - warnings.warn("Using '{self.__class__.__name__}' without a " - "'pyg-lib' installation is deprecated and will be " - "removed soon. Please install 'pyg-lib' for " - "accelerated neighborhood sampling") - - self.data_type = DataType.from_data(data) - - if self.data_type == DataType.homogeneous: - self.num_nodes = data.num_nodes - self.node_time = data[time_attr] if time_attr else None - - # Convert the graph data into CSC format for sampling: - self.colptr, self.row, self.perm = to_csc( - data, device='cpu', share_memory=share_memory, - is_sorted=is_sorted, src_node_time=self.node_time) - - elif self.data_type == DataType.heterogeneous: - self.node_types, self.edge_types = data.metadata() - - self.num_nodes = {k: data[k].num_nodes for k in self.node_types} - self.node_time = data.collect(time_attr) if time_attr else None - - # Conversion to/from C++ string type: Since C++ cannot take - # dictionaries with tuples as key as input, edge type triplets need - # to be converted into single strings. - self.to_rel_type = {k: '__'.join(k) for k in self.edge_types} - self.to_edge_type = {v: k for k, v in self.to_rel_type.items()} - - # Convert the graph data into CSC format for sampling: - colptr_dict, row_dict, self.perm = to_hetero_csc( - data, device='cpu', share_memory=share_memory, - is_sorted=is_sorted, node_time_dict=self.node_time) - self.row_dict = remap_keys(row_dict, self.to_rel_type) - self.colptr_dict = remap_keys(colptr_dict, self.to_rel_type) - - else: # self.data_type == DataType.remote - feature_store, graph_store = data - - # Obtain graph metadata: - node_attrs = feature_store.get_all_tensor_attrs() - self.node_types = list(set(attr.group_name for attr in node_attrs)) - - edge_attrs = graph_store.get_all_edge_attrs() - self.edge_types = list(set(attr.edge_type for attr in edge_attrs)) - - self.num_nodes = { - node_type: remote_backend_utils.size(*data, node_type) - for node_type in self.node_types - } - - self.node_time: Optional[Dict[str, Tensor]] = None - if time_attr is not None: - # If the `time_attr` is present, we expect that `GraphStore` - # holds all edges sorted by destination, and within local - # neighborhoods, node indices should be sorted by time. - # TODO (matthias, manan) Find an alternative way to ensure. - for edge_attr in edge_attrs: - if edge_attr.layout == EdgeLayout.CSR: - raise ValueError( - "Temporal sampling requires that edges are stored " - "in either COO or CSC layout") - if not edge_attr.is_sorted: - raise ValueError( - "Temporal sampling requires that edges are " - "sorted by destination, and by source time " - "within local neighborhoods") - - # We obtain all features with `node_attr.name=time_attr`: - time_attrs = [ - copy.copy(attr) for attr in node_attrs - if attr.attr_name == time_attr - ] - for attr in time_attrs: # Reset the index to obtain full data. - attr.index = None - time_tensors = feature_store.multi_get_tensor(time_attrs) - self.node_time = { - time_attr.group_name: time_tensor - for time_attr, time_tensor in zip(time_attrs, time_tensors) - } - - # Conversion to/from C++ string type (see above): - self.to_rel_type = {k: '__'.join(k) for k in self.edge_types} - self.to_edge_type = {v: k for k, v in self.to_rel_type.items()} - - # Convert the graph data into CSC format for sampling: - row_dict, colptr_dict, self.perm = graph_store.csc() - self.row_dict = remap_keys(row_dict, self.to_rel_type) - self.colptr_dict = remap_keys(colptr_dict, self.to_rel_type) - - self.num_neighbors = num_neighbors - self.replace = replace - self.directed = directed - self.disjoint = disjoint - self.temporal_strategy = temporal_strategy - - @property - def num_neighbors(self) -> NumNeighbors: - return self._num_neighbors - - @num_neighbors.setter - def num_neighbors(self, num_neighbors: NumNeighborsType): - if isinstance(num_neighbors, NumNeighbors): - self._num_neighbors = num_neighbors - else: - self._num_neighbors = NumNeighbors(num_neighbors) - - @property - def is_temporal(self) -> bool: - return self.node_time is not None - - @property - def disjoint(self) -> bool: - return self._disjoint or self.is_temporal - - @disjoint.setter - def disjoint(self, disjoint: bool): - self._disjoint = disjoint - - # Node-based sampling ##################################################### - - def sample_from_nodes( - self, - inputs: NodeSamplerInput, - ) -> Union[SamplerOutput, HeteroSamplerOutput]: - return node_sample(inputs, self._sample) - - # Edge-based sampling ##################################################### - - def sample_from_edges( - self, inputs: EdgeSamplerInput, - neg_sampling: Optional[NegativeSampling] = None - ) -> Union[SamplerOutput, HeteroSamplerOutput]: - return edge_sample(inputs, self._sample, self.num_nodes, self.disjoint, - self.node_time, neg_sampling) - - # Other Utilities ######################################################### - - @property - def edge_permutation(self) -> Union[OptTensor, Dict[EdgeType, OptTensor]]: - return self.perm - - # Helper functions ######################################################## - - def _sample( - self, - seed: Union[Tensor, Dict[NodeType, Tensor]], - seed_time: Optional[Union[Tensor, Dict[NodeType, Tensor]]] = None, - **kwargs, - ) -> Union[SamplerOutput, HeteroSamplerOutput]: - r"""Implements neighbor sampling by calling either :obj:`pyg-lib` (if - installed) or :obj:`torch-sparse` sampling routines.""" - if isinstance(seed, dict): # Heterogeneous sampling: - if torch_geometric.typing.WITH_PYG_LIB: - # TODO (matthias) `return_edge_id` if edge features present - # TODO (matthias) Ideally, `seed` inherits dtype from `colptr` - colptrs = list(self.colptr_dict.values()) - dtype = colptrs[0].dtype if len(colptrs) > 0 else torch.int64 - seed = {k: v.to(dtype) for k, v in seed.items()} - - out = torch.ops.pyg.hetero_neighbor_sample( - self.node_types, - self.edge_types, - self.colptr_dict, - self.row_dict, - seed, - self.num_neighbors.get_mapped_values(self.edge_types), - self.node_time, - seed_time, - True, # csc - self.replace, - self.directed, - self.disjoint, - self.temporal_strategy, - True, # return_edge_id - ) - row, col, node, edge, batch = out[:4] + (None, ) - - # `pyg-lib>0.1.0` returns sampled number of nodes/edges: - num_sampled_nodes = num_sampled_edges = None - if len(out) == 6: - num_sampled_nodes, num_sampled_edges = out[4:] - - if self.disjoint: - node = {k: v.t().contiguous() for k, v in node.items()} - batch = {k: v[0] for k, v in node.items()} - node = {k: v[1] for k, v in node.items()} - - elif torch_geometric.typing.WITH_TORCH_SPARSE: - if self.disjoint: - raise ValueError("'disjoint' sampling not supported for " - "neighbor sampling via 'torch-sparse'. " - "Please install 'pyg-lib' for improved " - "and optimized sampling routines.") - - out = torch.ops.torch_sparse.hetero_neighbor_sample( - self.node_types, - self.edge_types, - self.colptr_dict, - self.row_dict, - seed, # seed_dict - self.num_neighbors.get_mapped_values(self.edge_types), - self.num_neighbors.num_hops, - self.replace, - self.directed, - ) - node, row, col, edge, batch = out + (None, ) - num_sampled_nodes = num_sampled_edges = None - - else: - raise ImportError(f"'{self.__class__.__name__}' requires " - f"either 'pyg-lib' or 'torch-sparse'") - - if num_sampled_edges is not None: - num_sampled_edges = remap_keys( - num_sampled_edges, - self.to_edge_type, - ) - - return HeteroSamplerOutput( - node=node, - row=remap_keys(row, self.to_edge_type), - col=remap_keys(col, self.to_edge_type), - edge=remap_keys(edge, self.to_edge_type), - batch=batch, - num_sampled_nodes=num_sampled_nodes, - num_sampled_edges=num_sampled_edges, - ) - - else: # Homogeneous sampling: - if torch_geometric.typing.WITH_PYG_LIB: - # TODO (matthias) `return_edge_id` if edge features present - # TODO (matthias) Ideally, `seed` inherits dtype from `colptr` - out = torch.ops.pyg.neighbor_sample( - self.colptr, - self.row, - seed.to(self.colptr.dtype), # seed - self.num_neighbors.get_mapped_values(), - self.node_time, - seed_time, - True, # csc - self.replace, - self.directed, - self.disjoint, - self.temporal_strategy, - True, # return_edge_id - ) - row, col, node, edge, batch = out[:4] + (None, ) - - # `pyg-lib>0.1.0` returns sampled number of nodes/edges: - num_sampled_nodes = num_sampled_edges = None - if len(out) == 6: - num_sampled_nodes, num_sampled_edges = out[4:] - - if self.disjoint: - batch, node = node.t().contiguous() - - elif torch_geometric.typing.WITH_TORCH_SPARSE: - if self.disjoint: - raise ValueError("'disjoint' sampling not supported for " - "neighbor sampling via 'torch-sparse'. " - "Please install 'pyg-lib' for improved " - "and optimized sampling routines.") - - out = torch.ops.torch_sparse.neighbor_sample( - self.colptr, - self.row, - seed, # seed - self.num_neighbors.get_mapped_values(), - self.replace, - self.directed, - ) - node, row, col, edge, batch = out + (None, ) - num_sampled_nodes = num_sampled_edges = None - - else: - raise ImportError(f"'{self.__class__.__name__}' requires " - f"either 'pyg-lib' or 'torch-sparse'") - - return SamplerOutput( - node=node, - row=row, - col=col, - edge=edge, - batch=batch, - num_sampled_nodes=num_sampled_nodes, - num_sampled_edges=num_sampled_edges, - ) - - -# Sampling Utilities ########################################################## - - -def node_sample( - inputs: NodeSamplerInput, - sample_fn: Callable, -) -> Union[SamplerOutput, HeteroSamplerOutput]: - r"""Performs sampling from a :class:`NodeSamplerInput`, leveraging a - sampling function that accepts a seed and (optionally) a seed time as - input. Returns the output of this sampling procedure.""" - if inputs.input_type is not None: # Heterogeneous sampling: - seed = {inputs.input_type: inputs.node} - seed_time = None - if inputs.time is not None: - seed_time = {inputs.input_type: inputs.time} - else: # Homogeneous sampling: - seed = inputs.node - seed_time = inputs.time - - out = sample_fn(seed, seed_time) - out.metadata = (inputs.input_id, inputs.time) - - return out - - -def edge_sample( - inputs: EdgeSamplerInput, - sample_fn: Callable, - num_nodes: Union[int, Dict[NodeType, int]], - disjoint: bool, - node_time: Optional[Union[Tensor, Dict[str, Tensor]]] = None, - neg_sampling: Optional[NegativeSampling] = None, -) -> Union[SamplerOutput, HeteroSamplerOutput]: - r"""Performs sampling from an edge sampler input, leveraging a sampling - function of the same signature as `node_sample`.""" - input_id = inputs.input_id - src = inputs.row - dst = inputs.col - edge_label = inputs.label - edge_label_time = inputs.time - input_type = inputs.input_type - - src_time = dst_time = edge_label_time - assert edge_label_time is None or disjoint - - assert isinstance(num_nodes, (dict, int)) - if not isinstance(num_nodes, dict): - num_src_nodes = num_dst_nodes = num_nodes - else: - num_src_nodes = num_nodes[input_type[0]] - num_dst_nodes = num_nodes[input_type[-1]] - - num_pos = src.numel() - num_neg = 0 - - # Negative Sampling ####################################################### - - if neg_sampling is not None: - # When we are doing negative sampling, we append negative information - # of nodes/edges to `src`, `dst`, `src_time`, `dst_time`. - # Later on, we can easily reconstruct what belongs to positive and - # negative examples by slicing via `num_pos`. - num_neg = math.ceil(num_pos * neg_sampling.amount) - - if neg_sampling.is_binary(): - # In the "binary" case, we randomly sample negative pairs of nodes. - if isinstance(node_time, dict): - src_node_time = node_time.get(input_type[0]) - else: - src_node_time = node_time - - src_neg = neg_sample(src, neg_sampling, num_src_nodes, src_time, - src_node_time) - src = torch.cat([src, src_neg], dim=0) - - if isinstance(node_time, dict): - dst_node_time = node_time.get(input_type[-1]) - else: - dst_node_time = node_time - - dst_neg = neg_sample(dst, neg_sampling, num_dst_nodes, dst_time, - dst_node_time) - dst = torch.cat([dst, dst_neg], dim=0) - - if edge_label is None: - edge_label = torch.ones(num_pos) - size = (num_neg, ) + edge_label.size()[1:] - edge_neg_label = edge_label.new_zeros(size) - edge_label = torch.cat([edge_label, edge_neg_label]) - - if edge_label_time is not None: - src_time = dst_time = edge_label_time.repeat( - 1 + math.ceil(neg_sampling.amount))[:num_pos + num_neg] - - elif neg_sampling.is_triplet(): - # In the "triplet" case, we randomly sample negative destinations. - if isinstance(node_time, dict): - dst_node_time = node_time.get(input_type[-1]) - else: - dst_node_time = node_time - - dst_neg = neg_sample(dst, neg_sampling, num_dst_nodes, dst_time, - dst_node_time) - dst = torch.cat([dst, dst_neg], dim=0) - - assert edge_label is None - - if edge_label_time is not None: - dst_time = edge_label_time.repeat(1 + neg_sampling.amount) - - # Heterogeneus Neighborhood Sampling ###################################### - - if input_type is not None: - seed_time_dict = None - if input_type[0] != input_type[-1]: # Two distinct node types: - - if not disjoint: - src, inverse_src = src.unique(return_inverse=True) - dst, inverse_dst = dst.unique(return_inverse=True) - - seed_dict = {input_type[0]: src, input_type[-1]: dst} - - if edge_label_time is not None: # Always disjoint. - seed_time_dict = { - input_type[0]: src_time, - input_type[-1]: dst_time, - } - - else: # Only a single node type: Merge both source and destination. - - seed = torch.cat([src, dst], dim=0) - - if not disjoint: - seed, inverse_seed = seed.unique(return_inverse=True) - - seed_dict = {input_type[0]: seed} - - if edge_label_time is not None: # Always disjoint. - seed_time_dict = { - input_type[0]: torch.cat([src_time, dst_time], dim=0), - } - - out = sample_fn(seed_dict, seed_time_dict) - - # Enhance `out` by label information ################################## - if disjoint: - for key, batch in out.batch.items(): - out.batch[key] = batch % num_pos - - if neg_sampling is None or neg_sampling.is_binary(): - if disjoint: - if input_type[0] != input_type[-1]: - edge_label_index = torch.arange(num_pos + num_neg) - edge_label_index = edge_label_index.repeat(2).view(2, -1) - else: - edge_label_index = torch.arange(2 * (num_pos + num_neg)) - edge_label_index = edge_label_index.view(2, -1) - else: - if input_type[0] != input_type[-1]: - edge_label_index = torch.stack([ - inverse_src, - inverse_dst, - ], dim=0) - else: - edge_label_index = inverse_seed.view(2, -1) - - out.metadata = (input_id, edge_label_index, edge_label, src_time) - - elif neg_sampling.is_triplet(): - if disjoint: - src_index = torch.arange(num_pos) - if input_type[0] != input_type[-1]: - dst_pos_index = torch.arange(num_pos) - # `dst_neg_index` needs to be offset such that indices with - # offset `num_pos` belong to the same triplet: - dst_neg_index = torch.arange( - num_pos, seed_dict[input_type[-1]].numel()) - dst_neg_index = dst_neg_index.view(-1, num_pos).t() - else: - dst_pos_index = torch.arange(num_pos, 2 * num_pos) - dst_neg_index = torch.arange( - 2 * num_pos, seed_dict[input_type[-1]].numel()) - dst_neg_index = dst_neg_index.view(-1, num_pos).t() - else: - if input_type[0] != input_type[-1]: - src_index = inverse_src - dst_pos_index = inverse_dst[:num_pos] - dst_neg_index = inverse_dst[num_pos:] - else: - src_index = inverse_seed[:num_pos] - dst_pos_index = inverse_seed[num_pos:2 * num_pos] - dst_neg_index = inverse_seed[2 * num_pos:] - - dst_neg_index = dst_neg_index.view(num_pos, -1).squeeze(-1) - - out.metadata = (input_id, src_index, dst_pos_index, dst_neg_index, - src_time) - - # Homogeneus Neighborhood Sampling ######################################## - - else: - - seed = torch.cat([src, dst], dim=0) - seed_time = None - - if not disjoint: - seed, inverse_seed = seed.unique(return_inverse=True) - - if edge_label_time is not None: # Always disjoint. - seed_time = torch.cat([src_time, dst_time]) - - out = sample_fn(seed, seed_time) - - # Enhance `out` by label information ################################## - if neg_sampling is None or neg_sampling.is_binary(): - if disjoint: - out.batch = out.batch % num_pos - edge_label_index = torch.arange(2 * seed.numel()).view(2, -1) - else: - edge_label_index = inverse_seed.view(2, -1) - - out.metadata = (input_id, edge_label_index, edge_label, seed_time) - - elif neg_sampling.is_triplet(): - if disjoint: - out.batch = out.batch % num_pos - src_index = torch.arange(num_pos) - dst_pos_index = torch.arange(num_pos, 2 * num_pos) - # `dst_neg_index` needs to be offset such that indices with - # offset `num_pos` belong to the same triplet: - dst_neg_index = torch.arange(2 * num_pos, seed.numel()) - dst_neg_index = dst_neg_index.view(-1, num_pos).t() - else: - src_index = inverse_seed[:num_pos] - dst_pos_index = inverse_seed[num_pos:2 * num_pos] - dst_neg_index = inverse_seed[2 * num_pos:] - dst_neg_index = dst_neg_index.view(num_pos, -1).squeeze(-1) - - out.metadata = (input_id, src_index, dst_pos_index, dst_neg_index, - src_time) - - return out - - -def neg_sample(seed: Tensor, neg_sampling: NegativeSampling, num_nodes: int, - seed_time: Optional[Tensor], - node_time: Optional[Tensor]) -> Tensor: - num_neg = math.ceil(seed.numel() * neg_sampling.amount) - - # TODO: Do not sample false negatives. - if node_time is None: - return neg_sampling.sample(num_neg, num_nodes) - - # If we are in a temporal-sampling scenario, we need to respect the - # timestamp of the given nodes we can use as negative examples. - # That is, we can only sample nodes for which `node_time <= seed_time`. - # For now, we use a greedy algorithm which randomly samples negative - # nodes and discard any which do not respect the temporal constraint. - # We iteratively repeat this process until we have sampled a valid node for - # each seed. - # TODO See if this greedy algorithm here can be improved. - assert seed_time is not None - num_samples = math.ceil(neg_sampling.amount) - seed_time = seed_time.view(1, -1).expand(num_samples, -1) - - out = neg_sampling.sample(num_samples * seed.numel(), num_nodes) - out = out.view(num_samples, seed.numel()) - mask = node_time[out] > seed_time # holds all invalid samples. - neg_sampling_complete = False - for i in range(5): # pragma: no cover - num_invalid = int(mask.sum()) - if num_invalid == 0: - neg_sampling_complete = True - break - - # Greedily search for alternative negatives. - out[mask] = tmp = neg_sampling.sample(num_invalid, num_nodes) - mask[mask.clone()] = node_time[tmp] >= seed_time[mask] - - if not neg_sampling_complete: # pragma: no cover - # Not much options left. In that case, we set remaining negatives - # to the node with minimum timestamp. - out[mask] = node_time.argmin() - - return out.view(-1)[:num_neg] diff --git a/pytorch_geometric-2.3.1/torch_geometric/sampler/utils.py b/pytorch_geometric-2.3.1/torch_geometric/sampler/utils.py deleted file mode 100644 index 15ee49a..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/sampler/utils.py +++ /dev/null @@ -1,127 +0,0 @@ -from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union - -import numpy as np -import torch -from torch import Tensor - -from torch_geometric.data import Data, HeteroData -from torch_geometric.data.storage import EdgeStorage -from torch_geometric.typing import NodeType, OptTensor -from torch_geometric.utils import index_sort -from torch_geometric.utils.sparse import index2ptr - -# Edge Layout Conversion ###################################################### - - -def sort_csc( - row: Tensor, - col: Tensor, - src_node_time: OptTensor = None, -) -> Tuple[Tensor, Tensor, Tensor]: - if src_node_time is None: - col, perm = index_sort(col) - return row[perm], col, perm - else: - # We use `np.lexsort` to sort based on multiple keys. - # TODO There does not seem to exist a PyTorch equivalent yet :( - perm = np.lexsort([ - src_node_time[row].detach().cpu().numpy(), - col.detach().cpu().numpy() - ]) - perm = torch.from_numpy(perm).to(col.device) - - return row[perm], col[perm], perm - - -# TODO(manan) deprecate when FeatureStore / GraphStore unification is complete -def to_csc( - data: Union[Data, EdgeStorage], - device: Optional[torch.device] = None, - share_memory: bool = False, - is_sorted: bool = False, - src_node_time: Optional[Tensor] = None, -) -> Tuple[Tensor, Tensor, OptTensor]: - # Convert the graph data into a suitable format for sampling (CSC format). - # Returns the `colptr` and `row` indices of the graph, as well as an - # `perm` vector that denotes the permutation of edges. - # Since no permutation of edges is applied when using `SparseTensor`, - # `perm` can be of type `None`. - perm: Optional[Tensor] = None - - if hasattr(data, 'adj'): - if src_node_time is not None: - raise NotImplementedError("Temporal sampling via 'SparseTensor' " - "format not yet supported") - colptr, row, _ = data.adj.csc() - - elif hasattr(data, 'adj_t'): - if src_node_time is not None: - # TODO (matthias) This only works when instantiating a - # `SparseTensor` with `is_sorted=True`. Otherwise, the - # `SparseTensor` will by default re-sort the neighbors according to - # column index. - # As such, we probably want to consider re-adding error: - # raise NotImplementedError("Temporal sampling via 'SparseTensor' " - # "format not yet supported") - pass - colptr, row, _ = data.adj_t.csr() - - elif data.edge_index is not None: - row, col = data.edge_index - if not is_sorted: - row, col, perm = sort_csc(row, col, src_node_time) - - colptr = index2ptr(col, data.size(1)) - - else: - row = torch.empty(0, dtype=torch.long, device=device) - colptr = torch.zeros(data.num_nodes + 1, dtype=torch.long, - device=device) - - colptr = colptr.to(device) - row = row.to(device) - perm = perm.to(device) if perm is not None else None - - if not colptr.is_cuda and share_memory: - colptr.share_memory_() - row.share_memory_() - if perm is not None: - perm.share_memory_() - - return colptr, row, perm - - -def to_hetero_csc( - data: HeteroData, - device: Optional[torch.device] = None, - share_memory: bool = False, - is_sorted: bool = False, - node_time_dict: Optional[Dict[NodeType, Tensor]] = None, -) -> Tuple[Dict[str, Tensor], Dict[str, Tensor], Dict[str, OptTensor]]: - # Convert the heterogeneous graph data into a suitable format for sampling - # (CSC format). - # Returns dictionaries holding `colptr` and `row` indices as well as edge - # permutations for each edge type, respectively. - colptr_dict, row_dict, perm_dict = {}, {}, {} - - for edge_type, store in data.edge_items(): - src_node_time = (node_time_dict or {}).get(edge_type[0], None) - out = to_csc(store, device, share_memory, is_sorted, src_node_time) - colptr_dict[edge_type], row_dict[edge_type], perm_dict[edge_type] = out - - return colptr_dict, row_dict, perm_dict - - -############################################################################### - -X, Y = TypeVar('X'), TypeVar('Y') - - -def remap_keys( - inputs: Dict[X, Any], - mapping: Dict[X, Y], - exclude: Optional[List[X]] = None, -) -> Dict[Union[X, Y], Any]: - exclude = exclude or [] - return {(k if k in exclude else mapping.get(k, k)): v - for k, v in inputs.items()} diff --git a/pytorch_geometric-2.3.1/torch_geometric/testing/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/testing/__init__.py deleted file mode 100644 index 8b3aa9f..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/testing/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -from .decorators import ( - is_full_test, - onlyFullTest, - onlyLinux, - onlyPython, - onlyCUDA, - onlyGraphviz, - withPackage, - withCUDA, - disableExtensions, -) -from .feature_store import MyFeatureStore -from .graph_store import MyGraphStore -from .data import FakeHeteroDataset, get_random_edge_index - -__all__ = [ - 'is_full_test', - 'onlyFullTest', - 'onlyLinux', - 'onlyPython', - 'onlyCUDA', - 'onlyGraphviz', - 'withPackage', - 'withCUDA', - 'disableExtensions', - 'MyFeatureStore', - 'MyGraphStore', - 'get_random_edge_index', - 'FakeHeteroDataset', -] diff --git a/pytorch_geometric-2.3.1/torch_geometric/testing/data.py b/pytorch_geometric-2.3.1/torch_geometric/testing/data.py deleted file mode 100644 index 9e8c776..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/testing/data.py +++ /dev/null @@ -1,53 +0,0 @@ -from typing import Callable, Optional - -import torch -from torch import Tensor - -from torch_geometric.data import HeteroData, InMemoryDataset - - -def get_random_edge_index( - num_src_nodes: int, - num_dst_nodes: int, - num_edges: int, - dtype: Optional[torch.dtype] = None, - device: Optional[torch.device] = None, -) -> Tensor: - row = torch.randint(num_src_nodes, (num_edges, ), dtype=dtype, - device=device) - col = torch.randint(num_dst_nodes, (num_edges, ), dtype=dtype, - device=device) - return torch.stack([row, col], dim=0) - - -class FakeHeteroDataset(InMemoryDataset): - def __init__(self, transform: Optional[Callable] = None): - super().__init__(transform=transform) - - data = HeteroData() - - num_papers = 100 - num_authors = 10 - - data['paper'].x = torch.randn(num_papers, 16) - data['author'].x = torch.randn(num_authors, 8) - - edge_index = get_random_edge_index( - num_src_nodes=num_papers, - num_dst_nodes=num_authors, - num_edges=300, - ) - data['paper', 'author'].edge_index = edge_index - data['author', 'paper'].edge_index = edge_index.flip([0]) - - data['paper'].y = torch.randint(0, 4, (num_papers, )) - - perm = torch.randperm(num_papers) - data['paper'].train_mask = torch.zeros(num_papers, dtype=torch.bool) - data['paper'].train_mask[perm[0:60]] = True - data['paper'].val_mask = torch.zeros(num_papers, dtype=torch.bool) - data['paper'].val_mask[perm[60:80]] = True - data['paper'].test_mask = torch.zeros(num_papers, dtype=torch.bool) - data['paper'].test_mask[perm[80:100]] = True - - self.data, self.slices = self.collate([data]) diff --git a/pytorch_geometric-2.3.1/torch_geometric/testing/decorators.py b/pytorch_geometric-2.3.1/torch_geometric/testing/decorators.py deleted file mode 100644 index e7ceb3f..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/testing/decorators.py +++ /dev/null @@ -1,119 +0,0 @@ -import os -import sys -from importlib import import_module -from importlib.util import find_spec -from typing import Callable - -import torch -from packaging.requirements import Requirement - -from torch_geometric.visualization.graph import has_graphviz - - -def is_full_test() -> bool: - r"""Whether to run the full but time-consuming test suite.""" - return os.getenv('FULL_TEST', '0') == '1' - - -def onlyFullTest(func: Callable) -> Callable: - r"""A decorator to specify that this function belongs to the full test - suite.""" - import pytest - return pytest.mark.skipif( - not is_full_test(), - reason="Fast test run", - )(func) - - -def onlyLinux(func: Callable) -> Callable: - r"""A decorator to specify that this function should only execute on - Linux systems.""" - import pytest - return pytest.mark.skipif( - sys.platform != 'linux', - reason="No Linux system", - )(func) - - -def onlyPython(*args) -> Callable: - r"""A decorator to skip tests for any Python version not listed.""" - def decorator(func: Callable) -> Callable: - import pytest - - python_version = f'{sys.version_info.major}.{sys.version_info.minor}' - return pytest.mark.skipif( - python_version not in args, - reason=f"Python {python_version} not supported", - )(func) - - return decorator - - -def onlyCUDA(func: Callable) -> Callable: - r"""A decorator to skip tests if CUDA is not found.""" - import pytest - return pytest.mark.skipif( - not torch.cuda.is_available(), - reason="CUDA not available", - )(func) - - -def onlyGraphviz(func: Callable) -> Callable: - r"""A decorator to specify that this function should only execute in case - :obj:`graphviz` is installed.""" - import pytest - return pytest.mark.skipif( - not has_graphviz(), - reason="Graphviz not installed", - )(func) - - -def withPackage(*args) -> Callable: - r"""A decorator to skip tests if certain packages are not installed. - Also supports version specification.""" - def is_installed(package: str) -> bool: - req = Requirement(package) - if find_spec(req.name) is None: - return False - module = import_module(req.name) - if not hasattr(module, '__version__'): - return True - - version = module.__version__ - # `req.specifier` does not support `.dev` suffixes, e.g., for - # `pyg_lib==0.1.0.dev*`, so we manually drop them: - if '.dev' in version: - version = '.'.join(version.split('.dev')[:-1]) - - return version in req.specifier - - na_packages = set(package for package in args if not is_installed(package)) - - def decorator(func: Callable) -> Callable: - import pytest - return pytest.mark.skipif( - len(na_packages) > 0, - reason=f"Package(s) {na_packages} are not installed", - )(func) - - return decorator - - -def withCUDA(func: Callable): - r"""A decorator to test both on CPU and CUDA (if available).""" - import pytest - - devices = [torch.device('cpu')] - if torch.cuda.is_available(): - devices.append(torch.device('cuda:0')) - - return pytest.mark.parametrize('device', devices)(func) - - -def disableExtensions(func: Callable): - r"""A decorator to temporarily disable the usage of the - :obj:`torch_scatter`, :obj:`torch_sparse` and :obj:`pyg_lib` extension - packages.""" - import pytest - - return pytest.mark.usefixtures('disable_extensions')(func) diff --git a/pytorch_geometric-2.3.1/torch_geometric/testing/feature_store.py b/pytorch_geometric-2.3.1/torch_geometric/testing/feature_store.py deleted file mode 100644 index 55fa633..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/testing/feature_store.py +++ /dev/null @@ -1,60 +0,0 @@ -from typing import Dict, List, Optional, Tuple - -import torch -from torch import Tensor - -from torch_geometric.data import FeatureStore, TensorAttr -from torch_geometric.typing import FeatureTensorType - - -class MyFeatureStore(FeatureStore): - def __init__(self): - super().__init__() - self.store: Dict[Tuple[str, str], Tensor] = {} - - @staticmethod - def key(attr: TensorAttr) -> str: - return (attr.group_name, attr.attr_name) - - def _put_tensor(self, tensor: FeatureTensorType, attr: TensorAttr) -> bool: - index = attr.index - - # None indices define the obvious index: - if index is None: - index = torch.arange(0, tensor.shape[0]) - - # Store the index: - self.store[MyFeatureStore.key(attr)] = (index, tensor) - - return True - - def _get_tensor(self, attr: TensorAttr) -> Optional[FeatureTensorType]: - index, tensor = self.store.get(MyFeatureStore.key(attr), (None, None)) - if tensor is None: - return None - - # None indices return the whole tensor: - if attr.index is None: - return tensor - - # Empty slices return the whole tensor: - if (isinstance(attr.index, slice) - and attr.index == slice(None, None, None)): - return tensor - - idx = (torch.cat([(index == v).nonzero() for v in attr.index]).view(-1) - if attr.index.numel() > 0 else []) - return tensor[idx] - - def _remove_tensor(self, attr: TensorAttr) -> bool: - del self.store[MyFeatureStore.key(attr)] - return True - - def _get_tensor_size(self, attr: TensorAttr) -> Tuple: - return self._get_tensor(attr).size() - - def get_all_tensor_attrs(self) -> List[str]: - return [TensorAttr(*key) for key in self.store.keys()] - - def __len__(self): - raise NotImplementedError diff --git a/pytorch_geometric-2.3.1/torch_geometric/transforms/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/transforms/__init__.py deleted file mode 100644 index 3b7f750..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/transforms/__init__.py +++ /dev/null @@ -1,136 +0,0 @@ -# flake8: noqa - -from .base_transform import BaseTransform -from .compose import Compose -from .to_device import ToDevice -from .to_sparse_tensor import ToSparseTensor -from .constant import Constant -from .normalize_features import NormalizeFeatures -from .svd_feature_reduction import SVDFeatureReduction -from .remove_training_classes import RemoveTrainingClasses -from .random_node_split import RandomNodeSplit -from .random_link_split import RandomLinkSplit -from .mask import IndexToMask, MaskToIndex -from .pad import Pad - -from .to_undirected import ToUndirected -from .one_hot_degree import OneHotDegree -from .target_indegree import TargetIndegree -from .local_degree_profile import LocalDegreeProfile -from .add_self_loops import AddSelfLoops -from .remove_isolated_nodes import RemoveIsolatedNodes -from .remove_duplicated_edges import RemoveDuplicatedEdges -from .knn_graph import KNNGraph -from .radius_graph import RadiusGraph -from .to_dense import ToDense -from .two_hop import TwoHop -from .line_graph import LineGraph -from .laplacian_lambda_max import LaplacianLambdaMax -from .gdc import GDC -from .sign import SIGN -from .gcn_norm import GCNNorm -from .add_metapaths import AddMetaPaths, AddRandomMetaPaths -from .rooted_subgraph import RootedEgoNets, RootedRWSubgraph -from .largest_connected_components import LargestConnectedComponents -from .virtual_node import VirtualNode -from .add_positional_encoding import AddLaplacianEigenvectorPE, AddRandomWalkPE -from .feature_propagation import FeaturePropagation - -from .distance import Distance -from .cartesian import Cartesian -from .local_cartesian import LocalCartesian -from .polar import Polar -from .spherical import Spherical -from .point_pair_features import PointPairFeatures -from .center import Center -from .normalize_rotation import NormalizeRotation -from .normalize_scale import NormalizeScale -from .random_jitter import RandomJitter -from .random_flip import RandomFlip -from .linear_transformation import LinearTransformation -from .random_scale import RandomScale -from .random_rotate import RandomRotate -from .random_shear import RandomShear -from .face_to_edge import FaceToEdge -from .sample_points import SamplePoints -from .fixed_points import FixedPoints -from .generate_mesh_normals import GenerateMeshNormals -from .delaunay import Delaunay -from .to_superpixels import ToSLIC -from .grid_sampling import GridSampling - -general_transforms = [ - 'BaseTransform', - 'Compose', - 'ToDevice', - 'ToSparseTensor', - 'Constant', - 'NormalizeFeatures', - 'SVDFeatureReduction', - 'RemoveTrainingClasses', - 'RandomNodeSplit', - 'RandomLinkSplit', - 'IndexToMask', - 'MaskToIndex', - 'Pad', -] - -graph_transforms = [ - 'ToUndirected', - 'OneHotDegree', - 'TargetIndegree', - 'LocalDegreeProfile', - 'AddSelfLoops', - 'RemoveIsolatedNodes', - 'RemoveDuplicatedEdges', - 'KNNGraph', - 'RadiusGraph', - 'ToDense', - 'TwoHop', - 'LineGraph', - 'LaplacianLambdaMax', - 'GDC', - 'SIGN', - 'GCNNorm', - 'AddMetaPaths', - 'AddRandomMetaPaths', - 'RootedEgoNets', - 'RootedRWSubgraph', - 'LargestConnectedComponents', - 'VirtualNode', - 'AddLaplacianEigenvectorPE', - 'AddRandomWalkPE', - 'FeaturePropagation', -] - -vision_transforms = [ - 'Distance', - 'Cartesian', - 'LocalCartesian', - 'Polar', - 'Spherical', - 'PointPairFeatures', - 'Center', - 'NormalizeRotation', - 'NormalizeScale', - 'RandomJitter', - 'RandomFlip', - 'LinearTransformation', - 'RandomScale', - 'RandomRotate', - 'RandomShear', - 'FaceToEdge', - 'SamplePoints', - 'FixedPoints', - 'GenerateMeshNormals', - 'Delaunay', - 'ToSLIC', - 'GridSampling', -] - -__all__ = general_transforms + graph_transforms + vision_transforms - -from torch_geometric.deprecation import deprecated - -RandomTranslate = deprecated("use 'transforms.RandomJitter' instead", - 'transforms.RandomTranslate')(RandomJitter) diff --git a/pytorch_geometric-2.3.1/torch_geometric/transforms/add_positional_encoding.py b/pytorch_geometric-2.3.1/torch_geometric/transforms/add_positional_encoding.py deleted file mode 100644 index 6229f0d..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/transforms/add_positional_encoding.py +++ /dev/null @@ -1,140 +0,0 @@ -from typing import Any, Optional - -import numpy as np -import torch - -from torch_geometric.data import Data -from torch_geometric.data.datapipes import functional_transform -from torch_geometric.transforms import BaseTransform -from torch_geometric.typing import SparseTensor -from torch_geometric.utils import ( - get_laplacian, - get_self_loop_attr, - to_scipy_sparse_matrix, -) - - -def add_node_attr(data: Data, value: Any, - attr_name: Optional[str] = None) -> Data: - # TODO Move to `BaseTransform`. - if attr_name is None: - if 'x' in data: - x = data.x.view(-1, 1) if data.x.dim() == 1 else data.x - data.x = torch.cat([x, value.to(x.device, x.dtype)], dim=-1) - else: - data.x = value - else: - data[attr_name] = value - - return data - - -@functional_transform('add_laplacian_eigenvector_pe') -class AddLaplacianEigenvectorPE(BaseTransform): - r"""Adds the Laplacian eigenvector positional encoding from the - `"Benchmarking Graph Neural Networks" `_ - paper to the given graph - (functional name: :obj:`add_laplacian_eigenvector_pe`). - - Args: - k (int): The number of non-trivial eigenvectors to consider. - attr_name (str, optional): The attribute name of the data object to add - positional encodings to. If set to :obj:`None`, will be - concatenated to :obj:`data.x`. - (default: :obj:`"laplacian_eigenvector_pe"`) - is_undirected (bool, optional): If set to :obj:`True`, this transform - expects undirected graphs as input, and can hence speed up the - computation of eigenvectors. (default: :obj:`False`) - **kwargs (optional): Additional arguments of - :meth:`scipy.sparse.linalg.eigs` (when :attr:`is_undirected` is - :obj:`False`) or :meth:`scipy.sparse.linalg.eigsh` (when - :attr:`is_undirected` is :obj:`True`). - """ - def __init__( - self, - k: int, - attr_name: Optional[str] = 'laplacian_eigenvector_pe', - is_undirected: bool = False, - **kwargs, - ): - self.k = k - self.attr_name = attr_name - self.is_undirected = is_undirected - self.kwargs = kwargs - - def __call__(self, data: Data) -> Data: - from scipy.sparse.linalg import eigs, eigsh - eig_fn = eigs if not self.is_undirected else eigsh - - num_nodes = data.num_nodes - edge_index, edge_weight = get_laplacian( - data.edge_index, - data.edge_weight, - normalization='sym', - num_nodes=num_nodes, - ) - - L = to_scipy_sparse_matrix(edge_index, edge_weight, num_nodes) - - eig_vals, eig_vecs = eig_fn( - L, - k=self.k + 1, - which='SR' if not self.is_undirected else 'SA', - return_eigenvectors=True, - **self.kwargs, - ) - - eig_vecs = np.real(eig_vecs[:, eig_vals.argsort()]) - pe = torch.from_numpy(eig_vecs[:, 1:self.k + 1]) - sign = -1 + 2 * torch.randint(0, 2, (self.k, )) - pe *= sign - - data = add_node_attr(data, pe, attr_name=self.attr_name) - return data - - -@functional_transform('add_random_walk_pe') -class AddRandomWalkPE(BaseTransform): - r"""Adds the random walk positional encoding from the `"Graph Neural - Networks with Learnable Structural and Positional Representations" - `_ paper to the given graph - (functional name: :obj:`add_random_walk_pe`). - - Args: - walk_length (int): The number of random walk steps. - attr_name (str, optional): The attribute name of the data object to add - positional encodings to. If set to :obj:`None`, will be - concatenated to :obj:`data.x`. - (default: :obj:`"random_walk_pe"`) - """ - def __init__( - self, - walk_length: int, - attr_name: Optional[str] = 'random_walk_pe', - ): - self.walk_length = walk_length - self.attr_name = attr_name - - def __call__(self, data: Data) -> Data: - num_nodes = data.num_nodes - edge_index, edge_weight = data.edge_index, data.edge_weight - - adj = SparseTensor.from_edge_index(edge_index, edge_weight, - sparse_sizes=(num_nodes, num_nodes)) - - # Compute D^{-1} A: - deg_inv = 1.0 / adj.sum(dim=1) - deg_inv[deg_inv == float('inf')] = 0 - adj = adj * deg_inv.view(-1, 1) - - out = adj - row, col, value = out.coo() - pe_list = [get_self_loop_attr((row, col), value, num_nodes)] - for _ in range(self.walk_length - 1): - out = out @ adj - row, col, value = out.coo() - pe_list.append(get_self_loop_attr((row, col), value, num_nodes)) - pe = torch.stack(pe_list, dim=-1) - - data = add_node_attr(data, pe, attr_name=self.attr_name) - return data diff --git a/pytorch_geometric-2.3.1/torch_geometric/transforms/cartesian.py b/pytorch_geometric-2.3.1/torch_geometric/transforms/cartesian.py deleted file mode 100644 index bcd56f0..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/transforms/cartesian.py +++ /dev/null @@ -1,55 +0,0 @@ -from typing import Optional - -import torch - -from torch_geometric.data import Data -from torch_geometric.data.datapipes import functional_transform -from torch_geometric.transforms import BaseTransform - - -@functional_transform('cartesian') -class Cartesian(BaseTransform): - r"""Saves the relative Cartesian coordinates of linked nodes in its edge - attributes (functional name: :obj:`cartesian`). - - Args: - norm (bool, optional): If set to :obj:`False`, the output will not be - normalized to the interval :math:`{[0, 1]}^D`. - (default: :obj:`True`) - max_value (float, optional): If set and :obj:`norm=True`, normalization - will be performed based on this value instead of the maximum value - found in the data. (default: :obj:`None`) - cat (bool, optional): If set to :obj:`False`, all existing edge - attributes will be replaced. (default: :obj:`True`) - """ - def __init__( - self, - norm: bool = True, - max_value: Optional[float] = None, - cat: bool = True, - ): - self.norm = norm - self.max = max_value - self.cat = cat - - def __call__(self, data: Data) -> Data: - (row, col), pos, pseudo = data.edge_index, data.pos, data.edge_attr - - cart = pos[row] - pos[col] - cart = cart.view(-1, 1) if cart.dim() == 1 else cart - - if self.norm and cart.numel() > 0: - max_value = cart.abs().max() if self.max is None else self.max - cart = cart / (2 * max_value) + 0.5 - - if pseudo is not None and self.cat: - pseudo = pseudo.view(-1, 1) if pseudo.dim() == 1 else pseudo - data.edge_attr = torch.cat([pseudo, cart.type_as(pseudo)], dim=-1) - else: - data.edge_attr = cart - - return data - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}(norm={self.norm}, ' - f'max_value={self.max})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/transforms/compose.py b/pytorch_geometric-2.3.1/torch_geometric/transforms/compose.py deleted file mode 100644 index 168ac00..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/transforms/compose.py +++ /dev/null @@ -1,29 +0,0 @@ -from typing import Callable, List, Union - -from torch_geometric.data import Data, HeteroData -from torch_geometric.transforms import BaseTransform - - -class Compose(BaseTransform): - """Composes several transforms together. - - Args: - transforms (List[Callable]): List of transforms to compose. - """ - def __init__(self, transforms: List[Callable]): - self.transforms = transforms - - def __call__( - self, - data: Union[Data, HeteroData], - ) -> Union[Data, HeteroData]: - for transform in self.transforms: - if isinstance(data, (list, tuple)): - data = [transform(d) for d in data] - else: - data = transform(data) - return data - - def __repr__(self) -> str: - args = [f' {transform}' for transform in self.transforms] - return '{}([\n{}\n])'.format(self.__class__.__name__, ',\n'.join(args)) diff --git a/pytorch_geometric-2.3.1/torch_geometric/transforms/distance.py b/pytorch_geometric-2.3.1/torch_geometric/transforms/distance.py deleted file mode 100644 index 72741f1..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/transforms/distance.py +++ /dev/null @@ -1,48 +0,0 @@ -from typing import Optional - -import torch - -from torch_geometric.data import Data -from torch_geometric.data.datapipes import functional_transform -from torch_geometric.transforms import BaseTransform - - -@functional_transform('distance') -class Distance(BaseTransform): - r"""Saves the Euclidean distance of linked nodes in its edge attributes - (functional name: :obj:`distance`). - - Args: - norm (bool, optional): If set to :obj:`False`, the output will not be - normalized to the interval :math:`[0, 1]`. (default: :obj:`True`) - max_value (float, optional): If set and :obj:`norm=True`, normalization - will be performed based on this value instead of the maximum value - found in the data. (default: :obj:`None`) - cat (bool, optional): If set to :obj:`False`, all existing edge - attributes will be replaced. (default: :obj:`True`) - """ - def __init__(self, norm: bool = True, max_value: Optional[float] = None, - cat: bool = True): - self.norm = norm - self.max = max_value - self.cat = cat - - def __call__(self, data: Data) -> Data: - (row, col), pos, pseudo = data.edge_index, data.pos, data.edge_attr - - dist = torch.norm(pos[col] - pos[row], p=2, dim=-1).view(-1, 1) - - if self.norm and dist.numel() > 0: - dist = dist / (dist.max() if self.max is None else self.max) - - if pseudo is not None and self.cat: - pseudo = pseudo.view(-1, 1) if pseudo.dim() == 1 else pseudo - data.edge_attr = torch.cat([pseudo, dist.type_as(pseudo)], dim=-1) - else: - data.edge_attr = dist - - return data - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}(norm={self.norm}, ' - f'max_value={self.max})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/transforms/local_cartesian.py b/pytorch_geometric-2.3.1/torch_geometric/transforms/local_cartesian.py deleted file mode 100644 index fdc715a..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/transforms/local_cartesian.py +++ /dev/null @@ -1,46 +0,0 @@ -import torch - -from torch_geometric.data import Data -from torch_geometric.data.datapipes import functional_transform -from torch_geometric.transforms import BaseTransform -from torch_geometric.utils import scatter - - -@functional_transform('local_cartesian') -class LocalCartesian(BaseTransform): - r"""Saves the relative Cartesian coordinates of linked nodes in its edge - attributes (functional name: :obj:`local_cartesian`). Each coordinate gets - *neighborhood-normalized* to the interval :math:`{[0, 1]}^D`. - - Args: - norm (bool, optional): If set to :obj:`False`, the output will not be - normalized to the interval :math:`{[0, 1]}^D`. - (default: :obj:`True`) - cat (bool, optional): If set to :obj:`False`, all existing edge - attributes will be replaced. (default: :obj:`True`) - """ - def __init__(self, norm: bool = True, cat: bool = True): - self.norm = norm - self.cat = cat - - def __call__(self, data: Data) -> Data: - (row, col), pos, pseudo = data.edge_index, data.pos, data.edge_attr - - cart = pos[row] - pos[col] - cart = cart.view(-1, 1) if cart.dim() == 1 else cart - - max_value = scatter(cart.abs(), col, 0, pos.size(0), reduce='max') - max_value = max_value.max(dim=-1, keepdim=True)[0] - - if self.norm: - cart = cart / (2 * max_value[col]) + 0.5 - else: - cart = cart / max_value[col] - - if pseudo is not None and self.cat: - pseudo = pseudo.view(-1, 1) if pseudo.dim() == 1 else pseudo - data.edge_attr = torch.cat([pseudo, cart.type_as(pseudo)], dim=-1) - else: - data.edge_attr = cart - - return data diff --git a/pytorch_geometric-2.3.1/torch_geometric/transforms/mask.py b/pytorch_geometric-2.3.1/torch_geometric/transforms/mask.py deleted file mode 100644 index 56741b3..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/transforms/mask.py +++ /dev/null @@ -1,128 +0,0 @@ -from typing import List, Optional, Union - -from torch_geometric.data import Data, HeteroData -from torch_geometric.data.datapipes import functional_transform -from torch_geometric.data.storage import BaseStorage -from torch_geometric.transforms import BaseTransform -from torch_geometric.utils import index_to_mask, mask_to_index - -AnyData = Union[Data, HeteroData] - - -def get_attrs_with_suffix( - attrs: Optional[List[str]], - store: BaseStorage, - suffix: str, -) -> List[str]: - if attrs is not None: - return attrs - return [key for key in store.keys() if key.endswith(suffix)] - - -def get_mask_size(attr: str, store: BaseStorage, size: Optional[int]) -> int: - if size is not None: - return size - return store.num_edges if store.is_edge_attr(attr) else store.num_nodes - - -@functional_transform('index_to_mask') -class IndexToMask(BaseTransform): - r"""Converts indices to a mask representation - (functional name: :obj:`index_to_mask`). - - Args: - attrs (str, [str], optional): If given, will only perform index to mask - conversion for the given attributes. If omitted, will infer the - attributes from the suffix :obj:`_index`. (default: :obj:`None`) - sizes (int, [int], optional): The size of the mask. If set to - :obj:`None`, an automatically sized tensor is returned. The number - of nodes will be used by default, except for edge attributes which - will use the number of edges as the mask size. - (default: :obj:`None`) - replace (bool, optional): if set to :obj:`True` replaces the index - attributes with mask tensors. (default: :obj:`False`) - """ - def __init__( - self, - attrs: Optional[Union[str, List[str]]] = None, - sizes: Optional[Union[int, List[int]]] = None, - replace: bool = False, - ): - self.attrs = [attrs] if isinstance(attrs, str) else attrs - self.sizes = sizes - self.replace = replace - - def __call__( - self, - data: Union[Data, HeteroData], - ) -> Union[Data, HeteroData]: - for store in data.stores: - attrs = get_attrs_with_suffix(self.attrs, store, '_index') - - sizes = self.sizes or ([None] * len(attrs)) - if isinstance(sizes, int): - sizes = [self.sizes] * len(attrs) - - if len(attrs) != len(sizes): - raise ValueError( - f"The number of attributes (got {len(attrs)}) must match " - f"the number of sizes provided (got {len(sizes)}).") - - for attr, size in zip(attrs, sizes): - if 'edge_index' in attr: - continue - if attr not in store: - continue - size = get_mask_size(attr, store, size) - mask = index_to_mask(store[attr], size=size) - store[f'{attr[:-6]}_mask'] = mask - if self.replace: - del store[attr] - - return data - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}(attrs={self.attrs}, ' - f'sizes={self.sizes}, replace={self.replace})') - - -@functional_transform('mask_to_index') -class MaskToIndex(BaseTransform): - r"""Converts a mask to an index representation - (functional name: :obj:`mask_to_index`). - - Args: - attrs (str, [str], optional): If given, will only perform mask to index - conversion for the given attributes. If omitted, will infer the - attributes from the suffix :obj:`_mask` (default: :obj:`None`) - replace (bool, optional): if set to :obj:`True` replaces the mask - attributes with index tensors. (default: :obj:`False`) - """ - def __init__( - self, - attrs: Optional[Union[str, List[str]]] = None, - replace: bool = False, - ): - self.attrs = [attrs] if isinstance(attrs, str) else attrs - self.replace = replace - - def __call__( - self, - data: Union[Data, HeteroData], - ) -> Union[Data, HeteroData]: - for store in data.stores: - attrs = get_attrs_with_suffix(self.attrs, store, '_mask') - - for attr in attrs: - if attr not in store: - continue - index = mask_to_index(store[attr]) - store[f'{attr[:-5]}_index'] = index - if self.replace: - del store[attr] - - return data - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}(attrs={self.attrs}, ' - f'replace={self.replace})') diff --git a/pytorch_geometric-2.3.1/torch_geometric/transforms/sign.py b/pytorch_geometric-2.3.1/torch_geometric/transforms/sign.py deleted file mode 100644 index 819d6ea..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/transforms/sign.py +++ /dev/null @@ -1,57 +0,0 @@ -import torch - -from torch_geometric.data import Data -from torch_geometric.data.datapipes import functional_transform -from torch_geometric.transforms import BaseTransform -from torch_geometric.typing import SparseTensor - - -@functional_transform('sign') -class SIGN(BaseTransform): - r"""The Scalable Inception Graph Neural Network module (SIGN) from the - `"SIGN: Scalable Inception Graph Neural Networks" - `_ paper (functional name: :obj:`sign`), - which precomputes the fixed representations - - .. math:: - \mathbf{X}^{(i)} = {\left( \mathbf{D}^{-1/2} \mathbf{A} - \mathbf{D}^{-1/2} \right)}^i \mathbf{X} - - for :math:`i \in \{ 1, \ldots, K \}` and saves them in - :obj:`data.x1`, :obj:`data.x2`, ... - - .. note:: - - Since intermediate node representations are pre-computed, this operator - is able to scale well to large graphs via classic mini-batching. - For an example of using SIGN, see `examples/sign.py - `_. - - Args: - K (int): The number of hops/layer. - """ - def __init__(self, K: int): - self.K = K - - def __call__(self, data: Data) -> Data: - assert data.edge_index is not None - row, col = data.edge_index - adj_t = SparseTensor(row=col, col=row, - sparse_sizes=(data.num_nodes, data.num_nodes)) - - deg = adj_t.sum(dim=1).to(torch.float) - deg_inv_sqrt = deg.pow(-0.5) - deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0 - adj_t = deg_inv_sqrt.view(-1, 1) * adj_t * deg_inv_sqrt.view(1, -1) - - assert data.x is not None - xs = [data.x] - for i in range(1, self.K + 1): - xs += [adj_t @ xs[-1]] - data[f'x{i}'] = xs[-1] - - return data - - def __repr__(self) -> str: - return f'{self.__class__.__name__}(K={self.K})' diff --git a/pytorch_geometric-2.3.1/torch_geometric/transforms/two_hop.py b/pytorch_geometric-2.3.1/torch_geometric/transforms/two_hop.py deleted file mode 100644 index a05792e..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/transforms/two_hop.py +++ /dev/null @@ -1,35 +0,0 @@ -import torch - -from torch_geometric.data import Data -from torch_geometric.data.datapipes import functional_transform -from torch_geometric.transforms import BaseTransform -from torch_geometric.typing import SparseTensor -from torch_geometric.utils import coalesce, remove_self_loops - - -@functional_transform('two_hop') -class TwoHop(BaseTransform): - r"""Adds the two hop edges to the edge indices - (functional name: :obj:`two_hop`).""" - def __call__(self, data: Data) -> Data: - edge_index, edge_attr = data.edge_index, data.edge_attr - N = data.num_nodes - - adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(N, N)) - - adj = adj @ adj - row, col, _ = adj.coo() - edge_index2 = torch.stack([row, col], dim=0) - edge_index2, _ = remove_self_loops(edge_index2) - - edge_index = torch.cat([edge_index, edge_index2], dim=1) - - if edge_attr is not None: - # We treat newly added edge features as "zero-features": - edge_attr2 = edge_attr.new_zeros(edge_index2.size(1), - *edge_attr.size()[1:]) - edge_attr = torch.cat([edge_attr, edge_attr2], dim=0) - - data.edge_index, data.edge_attr = coalesce(edge_index, edge_attr, N) - - return data diff --git a/pytorch_geometric-2.3.1/torch_geometric/typing.py b/pytorch_geometric-2.3.1/torch_geometric/typing.py deleted file mode 100644 index bb9c74e..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/typing.py +++ /dev/null @@ -1,136 +0,0 @@ -import warnings -from typing import Dict, List, Optional, Tuple, Union - -import numpy as np -import torch -from torch import Tensor - -WITH_PT2 = int(torch.__version__.split('.')[0]) >= 2 - -try: - import pyg_lib # noqa - WITH_PYG_LIB = True - WITH_GMM = WITH_PT2 and hasattr(pyg_lib.ops, 'grouped_matmul') - WITH_SAMPLED_OP = hasattr(pyg_lib.ops, 'sampled_add') - WITH_INDEX_SORT = hasattr(pyg_lib.ops, 'index_sort') -except (ImportError, OSError) as e: - if isinstance(e, OSError): - warnings.warn(f"An issue occurred while importing 'pyg-lib'. " - f"Disabling its usage. Stacktrace: {e}") - pyg_lib = object - WITH_PYG_LIB = False - WITH_GMM = False - WITH_SAMPLED_OP = False - WITH_INDEX_SORT = False - -try: - import torch_scatter # noqa - WITH_TORCH_SCATTER = True -except (ImportError, OSError) as e: - if isinstance(e, OSError): - warnings.warn(f"An issue occurred while importing 'torch-scatter'. " - f"Disabling its usage. Stacktrace: {e}") - torch_scatter = object - WITH_TORCH_SCATTER = False - -try: - import torch_sparse # noqa - from torch_sparse import SparseTensor - WITH_TORCH_SPARSE = True -except (ImportError, OSError) as e: - if isinstance(e, OSError): - warnings.warn(f"An issue occurred while importing 'torch-sparse'. " - f"Disabling its usage. Stacktrace: {e}") - torch_sparse = object - WITH_TORCH_SPARSE = False - - class SparseTensor: - def __init__(self, *args, **kwargs): - raise ImportError("'SparseTensor' requires 'torch-sparse'") - - @classmethod - def from_edge_index(cls, *args, **kwargs) -> 'SparseTensor': - raise ImportError("'SparseTensor' requires 'torch-sparse'") - - -# Types for accessing data #################################################### - -# Node-types are denoted by a single string, e.g.: `data['paper']`: -NodeType = str - -# Edge-types are denotes by a triplet of strings, e.g.: -# `data[('author', 'writes', 'paper')] -EdgeType = Tuple[str, str, str] - -DEFAULT_REL = 'to' -EDGE_TYPE_STR_SPLIT = '__' - - -class EdgeTypeStr(str): - r"""A helper class to construct serializable edge types by merging an edge - type tuple into a single string.""" - def __new__(cls, *args): - if isinstance(args[0], (list, tuple)): - # Unwrap `EdgeType((src, rel, dst))` and `EdgeTypeStr((src, dst))`: - args = tuple(args[0]) - - if len(args) == 1 and isinstance(args[0], str): - args = args[0] # An edge type string was passed. - - elif len(args) == 2 and all(isinstance(arg, str) for arg in args): - # A `(src, dst)` edge type was passed - add `DEFAULT_REL`: - args = (args[0], DEFAULT_REL, args[1]) - args = EDGE_TYPE_STR_SPLIT.join(args) - - elif len(args) == 3 and all(isinstance(arg, str) for arg in args): - # A `(src, rel, dst)` edge type was passed: - args = EDGE_TYPE_STR_SPLIT.join(args) - - else: - raise ValueError(f"Encountered invalid edge type '{args}'") - - return str.__new__(cls, args) - - def to_tuple(self) -> EdgeType: - r"""Returns the original edge type.""" - out = tuple(self.split(EDGE_TYPE_STR_SPLIT)) - if len(out) != 3: - raise ValueError(f"Cannot convert the edge type '{self}' to a " - f"tuple since it holds invalid characters") - return out - - -# There exist some short-cuts to query edge-types (given that the full triplet -# can be uniquely reconstructed, e.g.: -# * via str: `data['writes']` -# * via Tuple[str, str]: `data[('author', 'paper')]` -QueryType = Union[NodeType, EdgeType, str, Tuple[str, str]] - -Metadata = Tuple[List[NodeType], List[EdgeType]] - -# A representation of a feature tensor -FeatureTensorType = Union[Tensor, np.ndarray] - -# A representation of an edge index, following the possible formats: -# * COO: (row, col) -# * CSC: (row, colptr) -# * CSR: (rowptr, col) -EdgeTensorType = Tuple[Tensor, Tensor] - -# Types for message passing ################################################### - -Adj = Union[Tensor, SparseTensor] -OptTensor = Optional[Tensor] -PairTensor = Tuple[Tensor, Tensor] -OptPairTensor = Tuple[Tensor, Optional[Tensor]] -PairOptTensor = Tuple[Optional[Tensor], Optional[Tensor]] -Size = Optional[Tuple[int, int]] -NoneType = Optional[Tensor] - -MaybeHeteroNodeTensor = Union[Tensor, Dict[NodeType, Tensor]] -MaybeHeteroEdgeTensor = Union[Tensor, Dict[EdgeType, Tensor]] - -# Types for sampling ########################################################## - -InputNodes = Union[OptTensor, NodeType, Tuple[NodeType, OptTensor]] -InputEdges = Union[OptTensor, EdgeType, Tuple[EdgeType, OptTensor]] diff --git a/pytorch_geometric-2.3.1/torch_geometric/utils/__init__.py b/pytorch_geometric-2.3.1/torch_geometric/utils/__init__.py deleted file mode 100644 index d8d7719..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/utils/__init__.py +++ /dev/null @@ -1,137 +0,0 @@ -import copy - -from .scatter import scatter -from .segment import segment -from .sort import index_sort -from .degree import degree -from .softmax import softmax -from .dropout import dropout_adj, dropout_node, dropout_edge, dropout_path -from .augmentation import shuffle_node, mask_feature, add_random_edge -from .sort_edge_index import sort_edge_index -from .coalesce import coalesce -from .undirected import is_undirected, to_undirected -from .loop import (contains_self_loops, remove_self_loops, - segregate_self_loops, add_self_loops, - add_remaining_self_loops, get_self_loop_attr) -from .isolated import contains_isolated_nodes, remove_isolated_nodes -from .subgraph import (get_num_hops, subgraph, k_hop_subgraph, - bipartite_subgraph) -from .homophily import homophily -from .assortativity import assortativity -from .get_laplacian import get_laplacian -from .get_mesh_laplacian import get_mesh_laplacian -from .mask import mask_select, index_to_mask, mask_to_index -from .select import select, narrow -from .to_dense_batch import to_dense_batch -from .to_dense_adj import to_dense_adj -from .nested import to_nested_tensor, from_nested_tensor -from .sparse import (dense_to_sparse, is_sparse, is_torch_sparse_tensor, - to_torch_coo_tensor, to_torch_csr_tensor, - to_torch_csc_tensor, to_edge_index) -from .spmm import spmm -from .unbatch import unbatch, unbatch_edge_index -from .one_hot import one_hot -from .normalized_cut import normalized_cut -from .grid import grid -from .geodesic import geodesic_distance -from .convert import to_scipy_sparse_matrix, from_scipy_sparse_matrix -from .convert import to_networkx, from_networkx -from .convert import to_networkit, from_networkit -from .convert import to_trimesh, from_trimesh -from .convert import to_cugraph, from_cugraph -from .smiles import from_smiles, to_smiles -from .random import (erdos_renyi_graph, stochastic_blockmodel_graph, - barabasi_albert_graph) -from .negative_sampling import (negative_sampling, batched_negative_sampling, - structured_negative_sampling, - structured_negative_sampling_feasible) -from .tree_decomposition import tree_decomposition -from .embedding import get_embeddings -from .trim_to_layer import trim_to_layer -from .train_test_split_edges import train_test_split_edges - -__all__ = [ - 'scatter', - 'segment', - 'index_sort', - 'degree', - 'softmax', - 'dropout_node', - 'dropout_edge', - 'dropout_path', - 'dropout_adj', - 'shuffle_node', - 'mask_feature', - 'add_random_edge', - 'sort_edge_index', - 'coalesce', - 'is_undirected', - 'to_undirected', - 'contains_self_loops', - 'remove_self_loops', - 'segregate_self_loops', - 'add_self_loops', - 'add_remaining_self_loops', - 'get_self_loop_attr', - 'contains_isolated_nodes', - 'remove_isolated_nodes', - 'get_num_hops', - 'subgraph', - 'bipartite_subgraph', - 'k_hop_subgraph', - 'homophily', - 'assortativity', - 'get_laplacian', - 'get_mesh_laplacian', - 'mask_select', - 'index_to_mask', - 'mask_to_index', - 'select', - 'narrow', - 'to_dense_batch', - 'to_dense_adj', - 'to_nested_tensor', - 'from_nested_tensor', - 'dense_to_sparse', - 'is_torch_sparse_tensor', - 'is_sparse', - 'to_torch_coo_tensor', - 'to_torch_csr_tensor', - 'to_torch_csc_tensor', - 'to_edge_index', - 'spmm', - 'unbatch', - 'unbatch_edge_index', - 'one_hot', - 'normalized_cut', - 'grid', - 'geodesic_distance', - 'to_scipy_sparse_matrix', - 'from_scipy_sparse_matrix', - 'to_networkx', - 'from_networkx', - 'to_networkit', - 'from_networkit', - 'to_trimesh', - 'from_trimesh', - 'to_cugraph', - 'from_cugraph', - 'from_smiles', - 'to_smiles', - 'erdos_renyi_graph', - 'stochastic_blockmodel_graph', - 'barabasi_albert_graph', - 'negative_sampling', - 'batched_negative_sampling', - 'structured_negative_sampling', - 'structured_negative_sampling_feasible', - 'tree_decomposition', - 'get_embeddings', - 'trim_to_layer', - 'train_test_split_edges', -] - -# `structured_negative_sampling_feasible` is a long name and thus destroys the -# documentation rendering. We remove it for now from the documentation: -classes = copy.copy(__all__) -classes.remove('structured_negative_sampling_feasible') diff --git a/pytorch_geometric-2.3.1/torch_geometric/utils/convert.py b/pytorch_geometric-2.3.1/torch_geometric/utils/convert.py deleted file mode 100644 index 78b1b64..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/utils/convert.py +++ /dev/null @@ -1,457 +0,0 @@ -from collections import defaultdict -from typing import Any, Iterable, List, Optional, Tuple, Union - -import scipy.sparse -import torch -from torch import Tensor -from torch.utils.dlpack import from_dlpack, to_dlpack - -import torch_geometric -from torch_geometric.utils.num_nodes import maybe_num_nodes - - -def to_scipy_sparse_matrix( - edge_index: Tensor, - edge_attr: Optional[Tensor] = None, - num_nodes: Optional[int] = None, -) -> scipy.sparse.coo_matrix: - r"""Converts a graph given by edge indices and edge attributes to a scipy - sparse matrix. - - Args: - edge_index (LongTensor): The edge indices. - edge_attr (Tensor, optional): Edge weights or multi-dimensional - edge features. (default: :obj:`None`) - num_nodes (int, optional): The number of nodes, *i.e.* - :obj:`max_val + 1` of :attr:`index`. (default: :obj:`None`) - - Examples: - - >>> edge_index = torch.tensor([ - ... [0, 1, 1, 2, 2, 3], - ... [1, 0, 2, 1, 3, 2], - ... ]) - >>> to_scipy_sparse_matrix(edge_index) - <4x4 sparse matrix of type '' - with 6 stored elements in COOrdinate format> - """ - row, col = edge_index.cpu() - - if edge_attr is None: - edge_attr = torch.ones(row.size(0)) - else: - edge_attr = edge_attr.view(-1).cpu() - assert edge_attr.size(0) == row.size(0) - - N = maybe_num_nodes(edge_index, num_nodes) - out = scipy.sparse.coo_matrix( - (edge_attr.numpy(), (row.numpy(), col.numpy())), (N, N)) - return out - - -def from_scipy_sparse_matrix( - A: scipy.sparse.spmatrix) -> Tuple[Tensor, Tensor]: - r"""Converts a scipy sparse matrix to edge indices and edge attributes. - - Args: - A (scipy.sparse): A sparse matrix. - - Examples: - - >>> edge_index = torch.tensor([ - ... [0, 1, 1, 2, 2, 3], - ... [1, 0, 2, 1, 3, 2], - ... ]) - >>> adj = to_scipy_sparse_matrix(edge_index) - >>> # `edge_index` and `edge_weight` are both returned - >>> from_scipy_sparse_matrix(adj) - (tensor([[0, 1, 1, 2, 2, 3], - [1, 0, 2, 1, 3, 2]]), - tensor([1., 1., 1., 1., 1., 1.])) - """ - A = A.tocoo() - row = torch.from_numpy(A.row).to(torch.long) - col = torch.from_numpy(A.col).to(torch.long) - edge_index = torch.stack([row, col], dim=0) - edge_weight = torch.from_numpy(A.data) - return edge_index, edge_weight - - -def to_networkx( - data: 'torch_geometric.data.Data', - node_attrs: Optional[Iterable[str]] = None, - edge_attrs: Optional[Iterable[str]] = None, - graph_attrs: Optional[Iterable[str]] = None, - to_undirected: Optional[Union[bool, str]] = False, - remove_self_loops: bool = False, -) -> Any: - r"""Converts a :class:`torch_geometric.data.Data` instance to a - :obj:`networkx.Graph` if :attr:`to_undirected` is set to :obj:`True`, or - a directed :obj:`networkx.DiGraph` otherwise. - - Args: - data (torch_geometric.data.Data): The data object. - node_attrs (iterable of str, optional): The node attributes to be - copied. (default: :obj:`None`) - edge_attrs (iterable of str, optional): The edge attributes to be - copied. (default: :obj:`None`) - graph_attrs (iterable of str, optional): The graph attributes to be - copied. (default: :obj:`None`) - to_undirected (bool or str, optional): If set to :obj:`True` or - "upper", will return a :obj:`networkx.Graph` instead of a - :obj:`networkx.DiGraph`. The undirected graph will correspond to - the upper triangle of the corresponding adjacency matrix. - Similarly, if set to "lower", the undirected graph will correspond - to the lower triangle of the adjacency matrix. (default: - :obj:`False`) - remove_self_loops (bool, optional): If set to :obj:`True`, will not - include self loops in the resulting graph. (default: :obj:`False`) - - Examples: - - >>> edge_index = torch.tensor([ - ... [0, 1, 1, 2, 2, 3], - ... [1, 0, 2, 1, 3, 2], - ... ]) - >>> data = Data(edge_index=edge_index, num_nodes=4) - >>> to_networkx(data) - - - """ - import networkx as nx - - G = nx.Graph() if to_undirected else nx.DiGraph() - - G.add_nodes_from(range(data.num_nodes)) - - node_attrs = node_attrs or [] - edge_attrs = edge_attrs or [] - graph_attrs = graph_attrs or [] - - values = {} - for key, value in data(*(node_attrs + edge_attrs + graph_attrs)): - if torch.is_tensor(value): - value = value if value.dim() <= 1 else value.squeeze(-1) - values[key] = value.tolist() - else: - values[key] = value - - to_undirected = "upper" if to_undirected is True else to_undirected - to_undirected_upper = True if to_undirected == "upper" else False - to_undirected_lower = True if to_undirected == "lower" else False - - for i, (u, v) in enumerate(data.edge_index.t().tolist()): - - if to_undirected_upper and u > v: - continue - elif to_undirected_lower and u < v: - continue - - if remove_self_loops and u == v: - continue - - G.add_edge(u, v) - - for key in edge_attrs: - G[u][v][key] = values[key][i] - - for key in node_attrs: - for i, feat_dict in G.nodes(data=True): - feat_dict.update({key: values[key][i]}) - - for key in graph_attrs: - G.graph[key] = values[key] - - return G - - -def from_networkx( - G: Any, - group_node_attrs: Optional[Union[List[str], all]] = None, - group_edge_attrs: Optional[Union[List[str], all]] = None, -) -> 'torch_geometric.data.Data': - r"""Converts a :obj:`networkx.Graph` or :obj:`networkx.DiGraph` to a - :class:`torch_geometric.data.Data` instance. - - Args: - G (networkx.Graph or networkx.DiGraph): A networkx graph. - group_node_attrs (List[str] or all, optional): The node attributes to - be concatenated and added to :obj:`data.x`. (default: :obj:`None`) - group_edge_attrs (List[str] or all, optional): The edge attributes to - be concatenated and added to :obj:`data.edge_attr`. - (default: :obj:`None`) - - .. note:: - - All :attr:`group_node_attrs` and :attr:`group_edge_attrs` values must - be numeric. - - Examples: - - >>> edge_index = torch.tensor([ - ... [0, 1, 1, 2, 2, 3], - ... [1, 0, 2, 1, 3, 2], - ... ]) - >>> data = Data(edge_index=edge_index, num_nodes=4) - >>> g = to_networkx(data) - >>> # A `Data` object is returned - >>> from_networkx(g) - Data(edge_index=[2, 6], num_nodes=4) - """ - import networkx as nx - - from torch_geometric.data import Data - - G = nx.convert_node_labels_to_integers(G) - G = G.to_directed() if not nx.is_directed(G) else G - - if isinstance(G, (nx.MultiGraph, nx.MultiDiGraph)): - edges = list(G.edges(keys=False)) - else: - edges = list(G.edges) - - edge_index = torch.tensor(edges, dtype=torch.long).t().contiguous() - - data = defaultdict(list) - - if G.number_of_nodes() > 0: - node_attrs = list(next(iter(G.nodes(data=True)))[-1].keys()) - else: - node_attrs = {} - - if G.number_of_edges() > 0: - edge_attrs = list(next(iter(G.edges(data=True)))[-1].keys()) - else: - edge_attrs = {} - - for i, (_, feat_dict) in enumerate(G.nodes(data=True)): - if set(feat_dict.keys()) != set(node_attrs): - raise ValueError('Not all nodes contain the same attributes') - for key, value in feat_dict.items(): - data[str(key)].append(value) - - for i, (_, _, feat_dict) in enumerate(G.edges(data=True)): - if set(feat_dict.keys()) != set(edge_attrs): - raise ValueError('Not all edges contain the same attributes') - for key, value in feat_dict.items(): - key = f'edge_{key}' if key in node_attrs else key - data[str(key)].append(value) - - for key, value in G.graph.items(): - key = f'graph_{key}' if key in node_attrs else key - data[str(key)] = value - - for key, value in data.items(): - if isinstance(value, (tuple, list)) and isinstance(value[0], Tensor): - data[key] = torch.stack(value, dim=0) - else: - try: - data[key] = torch.tensor(value) - except (ValueError, TypeError): - pass - - data['edge_index'] = edge_index.view(2, -1) - data = Data.from_dict(data) - - if group_node_attrs is all: - group_node_attrs = list(node_attrs) - if group_node_attrs is not None: - xs = [] - for key in group_node_attrs: - x = data[key] - x = x.view(-1, 1) if x.dim() <= 1 else x - xs.append(x) - del data[key] - data.x = torch.cat(xs, dim=-1) - - if group_edge_attrs is all: - group_edge_attrs = list(edge_attrs) - if group_edge_attrs is not None: - xs = [] - for key in group_edge_attrs: - key = f'edge_{key}' if key in node_attrs else key - x = data[key] - x = x.view(-1, 1) if x.dim() <= 1 else x - xs.append(x) - del data[key] - data.edge_attr = torch.cat(xs, dim=-1) - - if data.x is None and data.pos is None: - data.num_nodes = G.number_of_nodes() - - return data - - -def to_networkit( - edge_index: Tensor, - edge_weight: Optional[Tensor] = None, - num_nodes: Optional[int] = None, - directed: bool = True, -) -> Any: - r"""Converts a :obj:`(edge_index, edge_weight)` tuple to a - :class:`networkit.Graph`. - - Args: - edge_index (torch.Tensor): The edge indices of the graph. - edge_weight (torch.Tensor, optional): The edge weights of the graph. - (default: :obj:`None`) - num_nodes (int, optional): The number of nodes in the graph. - (default: :obj:`None`) - directed (bool, optional): If set to :obj:`False`, the graph will be - undirected. (default: :obj:`True`) - """ - import networkit as nk - - num_nodes = maybe_num_nodes(edge_index, num_nodes) - - g = nk.graph.Graph( - num_nodes, - weighted=edge_weight is not None, - directed=directed, - ) - - if edge_weight is None: - edge_weight = torch.ones(edge_index.size(1)) - - if not directed: - mask = edge_index[0] <= edge_index[1] - edge_index = edge_index[:, mask] - edge_weight = edge_weight[mask] - - for (u, v), w in zip(edge_index.t().tolist(), edge_weight.tolist()): - g.addEdge(u, v, w) - - return g - - -def from_networkit(g: Any) -> Tuple[Tensor, Optional[Tensor]]: - r"""Converts a :class:`networkit.Graph` to a - :obj:`(edge_index, edge_weight)` tuple. - If the :class:`networkit.Graph` is not weighted, the returned - :obj:`edge_weight` will be :obj:`None`. - - Args: - g (networkkit.graph.Graph): A :obj:`networkit` graph object. - """ - is_directed = g.isDirected() - is_weighted = g.isWeighted() - - edge_indices, edge_weights = [], [] - for u, v, w in g.iterEdgesWeights(): - edge_indices.append([u, v]) - edge_weights.append(w) - if not is_directed: - edge_indices.append([v, u]) - edge_weights.append(w) - - edge_index = torch.tensor(edge_indices).t().contiguous() - edge_weight = torch.tensor(edge_weights) if is_weighted else None - - return edge_index, edge_weight - - -def to_trimesh(data): - r"""Converts a :class:`torch_geometric.data.Data` instance to a - :obj:`trimesh.Trimesh`. - - Args: - data (torch_geometric.data.Data): The data object. - - Example: - - >>> pos = torch.tensor([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]], - ... dtype=torch.float) - >>> face = torch.tensor([[0, 1, 2], [1, 2, 3]]).t() - - >>> data = Data(pos=pos, face=face) - >>> to_trimesh(data) - - """ - import trimesh - return trimesh.Trimesh(vertices=data.pos.detach().cpu().numpy(), - faces=data.face.detach().t().cpu().numpy(), - process=False) - - -def from_trimesh(mesh): - r"""Converts a :obj:`trimesh.Trimesh` to a - :class:`torch_geometric.data.Data` instance. - - Args: - mesh (trimesh.Trimesh): A :obj:`trimesh` mesh. - -Example: - - Example: - - >>> pos = torch.tensor([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]], - ... dtype=torch.float) - >>> face = torch.tensor([[0, 1, 2], [1, 2, 3]]).t() - - >>> data = Data(pos=pos, face=face) - >>> mesh = to_trimesh(data) - >>> from_trimesh(mesh) - Data(pos=[4, 3], face=[3, 2]) - """ - from torch_geometric.data import Data - - pos = torch.from_numpy(mesh.vertices).to(torch.float) - face = torch.from_numpy(mesh.faces).t().contiguous() - - return Data(pos=pos, face=face) - - -def to_cugraph(edge_index: Tensor, edge_weight: Optional[Tensor] = None, - relabel_nodes: bool = True, directed: bool = True): - r"""Converts a graph given by :obj:`edge_index` and optional - :obj:`edge_weight` into a :obj:`cugraph` graph object. - - Args: - edge_index (torch.Tensor): The edge indices of the graph. - edge_weight (torch.Tensor, optional): The edge weights of the graph. - (default: :obj:`None`) - relabel_nodes (bool, optional): If set to :obj:`True`, - :obj:`cugraph` will remove any isolated nodes, leading to a - relabeling of nodes. (default: :obj:`True`) - directed (bool, optional): If set to :obj:`False`, the graph will be - undirected. (default: :obj:`True`) - """ - import cudf - import cugraph - - g = cugraph.Graph(directed=directed) - df = cudf.from_dlpack(to_dlpack(edge_index.t())) - - if edge_weight is not None: - assert edge_weight.dim() == 1 - df['2'] = cudf.from_dlpack(to_dlpack(edge_weight)) - - g.from_cudf_edgelist( - df, - source=0, - destination=1, - edge_attr='2' if edge_weight is not None else None, - renumber=relabel_nodes, - ) - - return g - - -def from_cugraph(g: Any) -> Tuple[Tensor, Optional[Tensor]]: - r"""Converts a :obj:`cugraph` graph object into :obj:`edge_index` and - optional :obj:`edge_weight` tensors. - - Args: - g (cugraph.Graph): A :obj:`cugraph` graph object. - """ - df = g.view_edge_list() - - src = from_dlpack(df['src'].to_dlpack()).long() - dst = from_dlpack(df['dst'].to_dlpack()).long() - edge_index = torch.stack([src, dst], dim=0) - - edge_weight = None - if 'weights' in df: - edge_weight = from_dlpack(df['weights'].to_dlpack()) - - return edge_index, edge_weight diff --git a/pytorch_geometric-2.3.1/torch_geometric/utils/geodesic.py b/pytorch_geometric-2.3.1/torch_geometric/utils/geodesic.py deleted file mode 100644 index bea66bd..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/utils/geodesic.py +++ /dev/null @@ -1,114 +0,0 @@ -import multiprocessing as mp -from typing import Optional - -import numpy as np -import torch -from torch import Tensor - - -def geodesic_distance( - pos: Tensor, - face: Tensor, - src: Optional[Tensor] = None, - dest: Optional[Tensor] = None, - norm: bool = True, - max_distance: Optional[float] = None, - num_workers: int = 0, -) -> Tensor: - r"""Computes (normalized) geodesic distances of a mesh given by :obj:`pos` - and :obj:`face`. If :obj:`src` and :obj:`dest` are given, this method only - computes the geodesic distances for the respective source and target - node-pairs. - - .. note:: - - This function requires the :obj:`gdist` package. - To install, run :obj:`pip install cython && pip install gdist`. - - Args: - pos (Tensor): The node positions. - face (LongTensor): The face indices. - src (LongTensor, optional): If given, only compute geodesic distances - for the specified source indices. (default: :obj:`None`) - dest (LongTensor, optional): If given, only compute geodesic distances - for the specified target indices. (default: :obj:`None`) - norm (bool, optional): Normalizes geodesic distances by - :math:`\sqrt{\textrm{area}(\mathcal{M})}`. (default: :obj:`True`) - max_distance (float, optional): If given, only yields results for - geodesic distances less than :obj:`max_distance`. This will speed - up runtime dramatically. (default: :obj:`None`) - num_workers (int, optional): How many subprocesses to use for - calculating geodesic distances. - :obj:`0` means that computation takes place in the main process. - :obj:`-1` means that the available amount of CPU cores is used. - (default: :obj:`0`) - - :rtype: :class:`Tensor` - - Example: - - >>> pos = torch.Tensor([[0, 0, 0], - ... [2, 0, 0], - ... [0, 2, 0], - ... [2, 2, 0]]) - >>> face = torch.tensor([[0, 0], - ... [1, 2], - ... [3, 3]]) - >>> geodesic_distance(pos, face) - [[0, 1, 1, 1.4142135623730951], - [1, 0, 1.4142135623730951, 1], - [1, 1.4142135623730951, 0, 1], - [1.4142135623730951, 1, 1, 0]] - """ - import gdist - - max_distance = float('inf') if max_distance is None else max_distance - - if norm: - area = (pos[face[1]] - pos[face[0]]).cross(pos[face[2]] - pos[face[0]]) - norm = (area.norm(p=2, dim=1) / 2).sum().sqrt().item() - else: - norm = 1.0 - - dtype = pos.dtype - - pos = pos.detach().cpu().to(torch.double).numpy() - face = face.detach().t().cpu().to(torch.int).numpy() - - if src is None and dest is None: - out = gdist.local_gdist_matrix(pos, face, - max_distance * norm).toarray() / norm - return torch.from_numpy(out).to(dtype) - - if src is None: - src = np.arange(pos.shape[0], dtype=np.int32) - else: - src = src.detach().cpu().to(torch.int).numpy() - - dest = None if dest is None else dest.detach().cpu().to(torch.int).numpy() - - def _parallel_loop(pos, face, src, dest, max_distance, norm, i, dtype): - s = src[i:i + 1] - d = None if dest is None else dest[i:i + 1] - out = gdist.compute_gdist(pos, face, s, d, max_distance * norm) / norm - return torch.from_numpy(out).to(dtype) - - num_workers = mp.cpu_count() if num_workers <= -1 else num_workers - if num_workers > 0: - with mp.Pool(num_workers) as pool: - outs = pool.starmap( - _parallel_loop, - [(pos, face, src, dest, max_distance, norm, i, dtype) - for i in range(len(src))]) - else: - outs = [ - _parallel_loop(pos, face, src, dest, max_distance, norm, i, dtype) - for i in range(len(src)) - ] - - out = torch.cat(outs, dim=0) - - if dest is None: - out = out.view(-1, pos.shape[0]) - - return out diff --git a/pytorch_geometric-2.3.1/torch_geometric/utils/hetero.py b/pytorch_geometric-2.3.1/torch_geometric/utils/hetero.py deleted file mode 100644 index 103bc62..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/utils/hetero.py +++ /dev/null @@ -1,57 +0,0 @@ -from typing import List, Set - -import torch - -from torch_geometric.typing import EdgeType, NodeType -from torch_geometric.utils.num_nodes import maybe_num_nodes_dict - - -def group_hetero_graph(edge_index_dict, num_nodes_dict=None): - num_nodes_dict = maybe_num_nodes_dict(edge_index_dict, num_nodes_dict) - - tmp = list(edge_index_dict.values())[0] - - key2int = {} - - cumsum, offset = 0, {} # Helper data. - node_types, local_node_indices = [], [] - local2global = {} - for i, (key, N) in enumerate(num_nodes_dict.items()): - key2int[key] = i - node_types.append(tmp.new_full((N, ), i)) - local_node_indices.append(torch.arange(N, device=tmp.device)) - offset[key] = cumsum - local2global[key] = local_node_indices[-1] + cumsum - local2global[i] = local2global[key] - cumsum += N - - node_type = torch.cat(node_types, dim=0) - local_node_idx = torch.cat(local_node_indices, dim=0) - - edge_indices, edge_types = [], [] - for i, (keys, edge_index) in enumerate(edge_index_dict.items()): - key2int[keys] = i - inc = torch.tensor([offset[keys[0]], offset[keys[-1]]]).view(2, 1) - edge_indices.append(edge_index + inc.to(tmp.device)) - edge_types.append(tmp.new_full((edge_index.size(1), ), i)) - - edge_index = torch.cat(edge_indices, dim=-1) - edge_type = torch.cat(edge_types, dim=0) - - return (edge_index, edge_type, node_type, local_node_idx, local2global, - key2int) - - -def get_unused_node_types(node_types: List[NodeType], - edge_types: List[EdgeType]) -> Set[NodeType]: - dst_node_types = set(edge_type[-1] for edge_type in edge_types) - return set(node_types) - set(dst_node_types) - - -def check_add_self_loops(module: torch.nn.Module, edge_types: List[EdgeType]): - is_bipartite = any([key[0] != key[-1] for key in edge_types]) - if is_bipartite and getattr(module, 'add_self_loops', False): - raise ValueError( - f"'add_self_loops' attribute set to 'True' on module '{module}' " - f"for use with edge type(s) '{edge_types}'. This will lead to " - f"incorrect message passing results.") diff --git a/pytorch_geometric-2.3.1/torch_geometric/utils/mask.py b/pytorch_geometric-2.3.1/torch_geometric/utils/mask.py deleted file mode 100644 index 95ab71c..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/utils/mask.py +++ /dev/null @@ -1,69 +0,0 @@ -from typing import Optional - -import torch -from torch import Tensor - - -def mask_select(src: Tensor, dim: int, mask: Tensor) -> Tensor: - r"""Returns a new tensor which masks the :obj:`src` tensor along the - dimension :obj:`dim` according to the boolean mask :obj:`mask`. - - Args: - src (torch.Tensor): The input tensor. - dim (int): The dimension in which to mask. - mask (torch.BoolTensor): The 1-D tensor containing the binary mask to - index with. - """ - assert mask.dim() == 1 - assert src.size(dim) == mask.numel() - dim = dim + src.dim() if dim < 0 else dim - assert dim >= 0 and dim < src.dim() - - size = [1] * src.dim() - size[dim] = mask.numel() - - out = src.masked_select(mask.view(size)) - - size = list(src.size()) - size[dim] = -1 - - return out.view(size) - - -def index_to_mask(index: Tensor, size: Optional[int] = None) -> Tensor: - r"""Converts indices to a mask representation. - - Args: - idx (Tensor): The indices. - size (int, optional). The size of the mask. If set to :obj:`None`, a - minimal sized output mask is returned. - - Example: - - >>> index = torch.tensor([1, 3, 5]) - >>> index_to_mask(index) - tensor([False, True, False, True, False, True]) - - >>> index_to_mask(index, size=7) - tensor([False, True, False, True, False, True, False]) - """ - index = index.view(-1) - size = int(index.max()) + 1 if size is None else size - mask = index.new_zeros(size, dtype=torch.bool) - mask[index] = True - return mask - - -def mask_to_index(mask: Tensor) -> Tensor: - r"""Converts a mask to an index representation. - - Args: - mask (Tensor): The mask. - - Example: - - >>> mask = torch.tensor([False, True, False]) - >>> mask_to_index(mask) - tensor([1]) - """ - return mask.nonzero(as_tuple=False).view(-1) diff --git a/pytorch_geometric-2.3.1/torch_geometric/utils/num_nodes.py b/pytorch_geometric-2.3.1/torch_geometric/utils/num_nodes.py deleted file mode 100644 index d81e356..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/utils/num_nodes.py +++ /dev/null @@ -1,51 +0,0 @@ -from copy import copy -from typing import Optional # noqa - -import torch -from torch import Tensor - -from torch_geometric.typing import SparseTensor # noqa -from torch_geometric.utils.sparse import is_torch_sparse_tensor - - -@torch.jit._overload -def maybe_num_nodes(edge_index, num_nodes=None): - # type: (Tensor, Optional[int]) -> int - pass - - -@torch.jit._overload -def maybe_num_nodes(edge_index, num_nodes=None): - # type: (SparseTensor, Optional[int]) -> int - pass - - -def maybe_num_nodes(edge_index, num_nodes=None): - if num_nodes is not None: - return num_nodes - elif isinstance(edge_index, Tensor): - if is_torch_sparse_tensor(edge_index): - return max(edge_index.size(0), edge_index.size(1)) - return int(edge_index.max()) + 1 if edge_index.numel() > 0 else 0 - else: - return max(edge_index.size(0), edge_index.size(1)) - - -def maybe_num_nodes_dict(edge_index_dict, num_nodes_dict=None): - num_nodes_dict = {} if num_nodes_dict is None else copy(num_nodes_dict) - - found_types = list(num_nodes_dict.keys()) - - for keys, edge_index in edge_index_dict.items(): - - key = keys[0] - if key not in found_types: - N = int(edge_index[0].max() + 1) - num_nodes_dict[key] = max(N, num_nodes_dict.get(key, N)) - - key = keys[-1] - if key not in found_types: - N = int(edge_index[1].max() + 1) - num_nodes_dict[key] = max(N, num_nodes_dict.get(key, N)) - - return num_nodes_dict diff --git a/pytorch_geometric-2.3.1/torch_geometric/utils/repeat.py b/pytorch_geometric-2.3.1/torch_geometric/utils/repeat.py deleted file mode 100644 index 0a93f74..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/utils/repeat.py +++ /dev/null @@ -1,14 +0,0 @@ -import itertools -import numbers - - -def repeat(src, length): - if src is None: - return None - if isinstance(src, numbers.Number): - return list(itertools.repeat(src, length)) - if (len(src) > length): - return src[:length] - if (len(src) < length): - return src + list(itertools.repeat(src[-1], length - len(src))) - return src diff --git a/pytorch_geometric-2.3.1/torch_geometric/utils/scatter.py b/pytorch_geometric-2.3.1/torch_geometric/utils/scatter.py deleted file mode 100644 index a75f1fe..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/utils/scatter.py +++ /dev/null @@ -1,151 +0,0 @@ -import warnings -from typing import Optional - -import torch -from torch import Tensor - -import torch_geometric.typing -from torch_geometric.typing import torch_scatter - -major, minor, _ = torch.__version__.split('.', maxsplit=2) -major, minor = int(major), int(minor) -has_pytorch112 = major > 1 or (major == 1 and minor >= 12) - -if has_pytorch112: # pragma: no cover - - warnings.filterwarnings('ignore', '.*is in beta and the API may change.*') - - def broadcast(src: Tensor, ref: Tensor, dim: int) -> Tensor: - size = [1] * ref.dim() - size[dim] = -1 - return src.view(size).expand_as(ref) - - def scatter(src: Tensor, index: Tensor, dim: int = 0, - dim_size: Optional[int] = None, reduce: str = 'sum') -> Tensor: - r"""Reduces all values from the :obj:`src` tensor at the indices - specified in the :obj:`index` tensor along a given dimension - :obj:`dim`. See the `documentation - `__ of the :obj:`torch_scatter` package for more - information. - - Args: - src (torch.Tensor): The source tensor. - index (torch.Tensor): The index tensor. - dim (int, optional): The dimension along which to index. - (default: :obj:`0`) - dim_size (int, optional): The size of the output tensor at - dimension :obj:`dim`. If set to :obj:`None`, will create a - minimal-sized output tensor according to - :obj:`index.max() + 1`. (default: :obj:`None`) - reduce (str, optional): The reduce operation (:obj:`"sum"`, - :obj:`"mean"`, :obj:`"mul"`, :obj:`"min"` or :obj:`"max"`). - (default: :obj:`"sum"`) - """ - if index.dim() != 1: - raise ValueError(f"The `index` argument must be one-dimensional " - f"(got {index.dim()} dimensions)") - - dim = src.dim() + dim if dim < 0 else dim - - if dim < 0 or dim >= src.dim(): - raise ValueError(f"The `dim` argument must lay between 0 and " - f"{src.dim() - 1} (got {dim})") - - if dim_size is None: - dim_size = int(index.max()) + 1 if index.numel() > 0 else 0 - - # For now, we maintain various different code paths, based on whether - # the input requires gradients and whether it lays on the CPU/GPU. - # For example, `torch_scatter` is usually faster than - # `torch.scatter_reduce` on GPU, while `torch.scatter_reduce` is faster - # on CPU. - # `torch.scatter_reduce` has a faster forward implementation for - # "min"/"max" reductions since it does not compute additional arg - # indices, but is therefore way slower in its backward implementation. - # More insights can be found in `test/utils/test_scatter.py`. - - size = list(src.size()) - size[dim] = dim_size - - # For "sum" and "mean" reduction, we make use of `scatter_add_`: - if reduce == 'sum' or reduce == 'add': - index = broadcast(index, src, dim) - return src.new_zeros(size).scatter_add_(dim, index, src) - - if reduce == 'mean': - count = src.new_zeros(dim_size) - count.scatter_add_(0, index, src.new_ones(src.size(dim))) - count = count.clamp(min=1) - - index = broadcast(index, src, dim) - out = src.new_zeros(size).scatter_add_(dim, index, src) - - return out / broadcast(count, out, dim) - - # For "min" and "max" reduction, we prefer `scatter_reduce_` on CPU or - # in case the input does not require gradients: - if reduce == 'min' or reduce == 'max': - if (not torch_geometric.typing.WITH_TORCH_SCATTER - or not src.is_cuda or not src.requires_grad): - - if src.is_cuda and src.requires_grad: - warnings.warn(f"The usage of `scatter(reduce='{reduce}')` " - f"can be accelerated via the 'torch-scatter'" - f" package, but it was not found") - - index = broadcast(index, src, dim) - return src.new_zeros(size).scatter_reduce_( - dim, index, src, reduce=f'a{reduce}', include_self=False) - - return torch_scatter.scatter(src, index, dim, dim_size=dim_size, - reduce=reduce) - - # For "mul" reduction, we prefer `scatter_reduce_` on CPU: - if reduce == 'mul': - if (not torch_geometric.typing.WITH_TORCH_SCATTER - or not src.is_cuda): - - if src.is_cuda: - warnings.warn(f"The usage of `scatter(reduce='{reduce}')` " - f"can be accelerated via the 'torch-scatter'" - f" package, but it was not found") - - index = broadcast(index, src, dim) - # We initialize with `one` here to match `scatter_mul` output: - return src.new_ones(size).scatter_reduce_( - dim, index, src, reduce='prod', include_self=True) - - return torch_scatter.scatter(src, index, dim, dim_size=dim_size, - reduce='mul') - - raise ValueError(f"Encountered invalid `reduce` argument '{reduce}'") - -else: - - def scatter(src: Tensor, index: Tensor, dim: int = 0, - dim_size: Optional[int] = None, reduce: str = 'sum') -> Tensor: - r"""Reduces all values from the :obj:`src` tensor at the indices - specified in the :obj:`index` tensor along a given dimension - :obj:`dim`. See the `documentation - `_ of the :obj:`torch_scatter` package for more - information. - - Args: - src (torch.Tensor): The source tensor. - index (torch.Tensor): The index tensor. - dim (int, optional): The dimension along which to index. - (default: :obj:`0`) - dim_size (int, optional): The size of the output tensor at - dimension :obj:`dim`. If set to :obj:`None`, will create a - minimal-sized output tensor according to - :obj:`index.max() + 1`. (default: :obj:`None`) - reduce (str, optional): The reduce operation (:obj:`"sum"`, - :obj:`"mean"`, :obj:`"mul"`, :obj:`"min"` or :obj:`"max"`). - (default: :obj:`"sum"`) - """ - if not torch_geometric.typing.WITH_TORCH_SCATTER: - raise ImportError("'scatter' requires the 'torch-scatter' package") - return torch_scatter.scatter(src, index, dim, dim_size=dim_size, - reduce=reduce) diff --git a/pytorch_geometric-2.3.1/torch_geometric/utils/select.py b/pytorch_geometric-2.3.1/torch_geometric/utils/select.py deleted file mode 100644 index 210b127..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/utils/select.py +++ /dev/null @@ -1,52 +0,0 @@ -from typing import Any, List, Union - -import torch -from torch import Tensor - -from torch_geometric.utils.mask import mask_select - - -def select(src: Union[Tensor, List[Any]], index_or_mask: Tensor, - dim: int) -> Union[Tensor, List[Any]]: - r"""Selects the input tensor or input list according to a given index or - mask vector. - - Args: - src (torch.Tensor or list): The input tensor or list. - index_or_mask (torch.Tensor): The index or mask vector. - dim (int): The dimension along which to select. - """ - if isinstance(src, Tensor): - if index_or_mask.dtype == torch.bool: - return mask_select(src, dim, index_or_mask) - return src.index_select(dim, index_or_mask) - - if isinstance(src, (tuple, list)): - if dim != 0: - raise ValueError("Cannot select along dimension other than 0") - if index_or_mask.dtype == torch.bool: - return [src[i] for i, m in enumerate(index_or_mask) if m] - return [src[i] for i in index_or_mask] - - raise ValueError(f"Encountered invalid input type (got '{type(src)}')") - - -def narrow(src: Union[Tensor, List[Any]], dim: int, start: int, - length: int) -> Union[Tensor, List[Any]]: - r"""Narrows the input tensor or input list to the specified range. - - Args: - src (torch.Tensor or list): The input tensor or list. - dim (int): The dimension along which to narrow. - start (int): The starting dimension. - length (int): The distance to the ending dimension. - """ - if isinstance(src, Tensor): - return src.narrow(dim, start, length) - - if isinstance(src, list): - if dim != 0: - raise ValueError("Cannot narrow along dimension other than 0") - return src[start:start + length] - - raise ValueError(f"Encountered invalid input type (got '{type(src)}')") diff --git a/pytorch_geometric-2.3.1/torch_geometric/utils/sparse.py b/pytorch_geometric-2.3.1/torch_geometric/utils/sparse.py deleted file mode 100644 index e2c6034..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/utils/sparse.py +++ /dev/null @@ -1,320 +0,0 @@ -from typing import Any, Optional, Tuple, Union - -import torch -from torch import Tensor - -from torch_geometric.typing import SparseTensor - - -def dense_to_sparse(adj: Tensor) -> Tuple[Tensor, Tensor]: - r"""Converts a dense adjacency matrix to a sparse adjacency matrix defined - by edge indices and edge attributes. - - Args: - adj (Tensor): The dense adjacency matrix of shape - :obj:`[num_nodes, num_nodes]` or - :obj:`[batch_size, num_nodes, num_nodes]`. - - :rtype: (:class:`LongTensor`, :class:`Tensor`) - - Examples: - - >>> # Forr a single adjacency matrix - >>> adj = torch.tensor([[3, 1], - ... [2, 0]]) - >>> dense_to_sparse(adj) - (tensor([[0, 0, 1], - [0, 1, 0]]), - tensor([3, 1, 2])) - - >>> # For two adjacency matrixes - >>> adj = torch.tensor([[[3, 1], - ... [2, 0]], - ... [[0, 1], - ... [0, 2]]]) - >>> dense_to_sparse(adj) - (tensor([[0, 0, 1, 2, 3], - [0, 1, 0, 3, 3]]), - tensor([3, 1, 2, 1, 2])) - """ - if adj.dim() < 2 or adj.dim() > 3: - raise ValueError(f"Dense adjacency matrix 'adj' must be 2- or " - f"3-dimensional (got {adj.dim()} dimensions)") - - edge_index = adj.nonzero().t() - - if edge_index.size(0) == 2: - edge_attr = adj[edge_index[0], edge_index[1]] - return edge_index, edge_attr - else: - edge_attr = adj[edge_index[0], edge_index[1], edge_index[2]] - row = edge_index[1] + adj.size(-2) * edge_index[0] - col = edge_index[2] + adj.size(-1) * edge_index[0] - return torch.stack([row, col], dim=0), edge_attr - - -def is_torch_sparse_tensor(src: Any) -> bool: - r"""Returns :obj:`True` if the input :obj:`src` is a - :class:`torch.sparse.Tensor` (in any sparse layout). - - Args: - src (Any): The input object to be checked. - """ - if isinstance(src, Tensor): - if src.layout == torch.sparse_coo: - return True - if src.layout == torch.sparse_csr: - return True - if src.layout == torch.sparse_csc: - return True - return False - - -def is_sparse(src: Any) -> bool: - r"""Returns :obj:`True` if the input :obj:`src` is of type - :class:`torch.sparse.Tensor` (in any sparse layout) or of type - :class:`torch_sparse.SparseTensor`. - - Args: - src (Any): The input object to be checked. - """ - return is_torch_sparse_tensor(src) or isinstance(src, SparseTensor) - - -def to_torch_coo_tensor( - edge_index: Tensor, - edge_attr: Optional[Tensor] = None, - size: Optional[Union[int, Tuple[int, int]]] = None, - is_coalesced: bool = False, -) -> Tensor: - r"""Converts a sparse adjacency matrix defined by edge indices and edge - attributes to a :class:`torch.sparse.Tensor` with layout - `torch.sparse_coo`. - See :meth:`~torch_geometric.utils.to_edge_index` for the reverse operation. - - Args: - edge_index (LongTensor): The edge indices. - edge_attr (Tensor, optional): The edge attributes. - (default: :obj:`None`) - size (int or (int, int), optional): The size of the sparse matrix. - If given as an integer, will create a quadratic sparse matrix. - If set to :obj:`None`, will infer a quadratic sparse matrix based - on :obj:`edge_index.max() + 1`. (default: :obj:`None`) - is_coalesced (bool): If set to :obj:`True`, will assume that - :obj:`edge_index` is already coalesced and thus avoids expensive - computation. (default: :obj:`False`) - - :rtype: :class:`torch.sparse.Tensor` - - Example: - - >>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], - ... [1, 0, 2, 1, 3, 2]]) - >>> to_torch_coo_tensor(edge_index) - tensor(indices=tensor([[0, 1, 1, 2, 2, 3], - [1, 0, 2, 1, 3, 2]]), - values=tensor([1., 1., 1., 1., 1., 1.]), - size=(4, 4), nnz=6, layout=torch.sparse_coo) - - """ - if size is None: - size = int(edge_index.max()) + 1 - if not isinstance(size, (tuple, list)): - size = (size, size) - - if edge_attr is None: - edge_attr = torch.ones(edge_index.size(1), device=edge_index.device) - - size = tuple(size) + edge_attr.size()[1:] - - adj = torch.sparse_coo_tensor( - indices=edge_index, - values=edge_attr, - size=size, - device=edge_index.device, - ) - return adj._coalesced_(True) if is_coalesced else adj.coalesce() - - -def to_torch_csr_tensor( - edge_index: Tensor, - edge_attr: Optional[Tensor] = None, - size: Optional[Union[int, Tuple[int, int]]] = None, - is_coalesced: bool = False, -) -> Tensor: - r"""Converts a sparse adjacency matrix defined by edge indices and edge - attributes to a :class:`torch.sparse.Tensor` with layout - `torch.sparse_csr`. - See :meth:`~torch_geometric.utils.to_edge_index` for the reverse operation. - - Args: - edge_index (LongTensor): The edge indices. - edge_attr (Tensor, optional): The edge attributes. - (default: :obj:`None`) - size (int or (int, int), optional): The size of the sparse matrix. - If given as an integer, will create a quadratic sparse matrix. - If set to :obj:`None`, will infer a quadratic sparse matrix based - on :obj:`edge_index.max() + 1`. (default: :obj:`None`) - is_coalesced (bool): If set to :obj:`True`, will assume that - :obj:`edge_index` is already coalesced and thus avoids expensive - computation. (default: :obj:`False`) - - :rtype: :class:`torch.sparse.Tensor` - - Example: - - >>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], - ... [1, 0, 2, 1, 3, 2]]) - >>> to_torch_csr_tensor(edge_index) - tensor(crow_indices=tensor([0, 1, 3, 5, 6]), - col_indices=tensor([1, 0, 2, 1, 3, 2]), - values=tensor([1., 1., 1., 1., 1., 1.]), - size=(4, 4), nnz=6, layout=torch.sparse_csr) - - """ - adj = to_torch_coo_tensor(edge_index, edge_attr, size, is_coalesced) - return adj.to_sparse_csr() - - -def to_torch_csc_tensor( - edge_index: Tensor, - edge_attr: Optional[Tensor] = None, - size: Optional[Union[int, Tuple[int, int]]] = None, - is_coalesced: bool = False, -) -> Tensor: - r"""Converts a sparse adjacency matrix defined by edge indices and edge - attributes to a :class:`torch.sparse.Tensor` with layout - `torch.sparse_csc`. - See :meth:`~torch_geometric.utils.to_edge_index` for the reverse operation. - - Args: - edge_index (LongTensor): The edge indices. - edge_attr (Tensor, optional): The edge attributes. - (default: :obj:`None`) - size (int or (int, int), optional): The size of the sparse matrix. - If given as an integer, will create a quadratic sparse matrix. - If set to :obj:`None`, will infer a quadratic sparse matrix based - on :obj:`edge_index.max() + 1`. (default: :obj:`None`) - is_coalesced (bool): If set to :obj:`True`, will assume that - :obj:`edge_index` is already coalesced and thus avoids expensive - computation. (default: :obj:`False`) - - :rtype: :class:`torch.sparse.Tensor` - - Example: - - >>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], - ... [1, 0, 2, 1, 3, 2]]) - >>> to_torch_csc_tensor(edge_index) - tensor(ccol_indices=tensor([0, 1, 3, 5, 6]), - row_indices=tensor([1, 0, 2, 1, 3, 2]), - values=tensor([1., 1., 1., 1., 1., 1.]), - size=(4, 4), nnz=6, layout=torch.sparse_csc) - - """ - adj = to_torch_coo_tensor(edge_index, edge_attr, size, is_coalesced) - return adj.to_sparse_csc() - - -def to_edge_index(adj: Union[Tensor, SparseTensor]) -> Tuple[Tensor, Tensor]: - r"""Converts a :class:`torch.sparse.Tensor` or a - :class:`torch_sparse.SparseTensor` to edge indices and edge attributes. - - Args: - adj (torch.sparse.Tensor or SparseTensor): The adjacency matrix. - - :rtype: (:class:`torch.Tensor`, :class:`torch.Tensor`) - - Example: - - >>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], - ... [1, 0, 2, 1, 3, 2]]) - >>> adj = to_torch_coo_tensor(edge_index) - >>> to_edge_index(adj) - (tensor([[0, 1, 1, 2, 2, 3], - [1, 0, 2, 1, 3, 2]]), - tensor([1., 1., 1., 1., 1., 1.])) - """ - if isinstance(adj, SparseTensor): - row, col, value = adj.coo() - if value is None: - value = torch.ones(row.size(0), device=row.device) - return torch.stack([row, col], dim=0).long(), value - - if adj.layout == torch.sparse_coo: - return adj.indices().detach().long(), adj.values() - - if adj.layout == torch.sparse_csr: - row = ptr2index(adj.crow_indices().detach()) - col = adj.col_indices().detach() - return torch.stack([row, col], dim=0).long(), adj.values() - - if adj.layout == torch.sparse_csc: - col = ptr2index(adj.ccol_indices().detach()) - row = adj.row_indices().detach() - return torch.stack([row, col], dim=0).long(), adj.values() - - raise ValueError(f"Unexpected sparse tensor layout (got '{adj.layout}')") - - -# Helper functions ############################################################ - - -def get_sparse_diag( - size: int, - fill_value: float = 1.0, - layout: Optional[int] = None, - dtype: Optional[torch.dtype] = None, - device: Optional[torch.device] = None, -) -> Tensor: - return torch.sparse.spdiags( - torch.full((1, size), fill_value, dtype=dtype, device=device), - offsets=torch.zeros(1, dtype=torch.long, device=device), - shape=(size, size), - layout=layout, - ) - - -def set_sparse_value(adj: Tensor, value: Tensor) -> Tensor: - size = adj.size() - - if value.dim() > 1: - size = size + value.size()[1:] - - if adj.layout == torch.sparse_coo: - return torch.sparse_coo_tensor( - indices=adj.indices(), - values=value, - size=size, - device=value.device, - ).coalesce() - - if adj.layout == torch.sparse_csr: - return torch.sparse_csr_tensor( - crow_indices=adj.crow_indices(), - col_indices=adj.col_indices(), - values=value, - size=size, - device=value.device, - ) - - if adj.layout == torch.sparse_csc: - return torch.sparse_csc_tensor( - ccol_indices=adj.ccol_indices(), - row_indices=adj.row_indices(), - values=value, - size=size, - device=value.device, - ) - - raise ValueError(f"Unexpected sparse tensor layout (got '{adj.layout}')") - - -def ptr2index(ptr: Tensor) -> Tensor: - ind = torch.arange(ptr.numel() - 1, dtype=ptr.dtype, device=ptr.device) - return ind.repeat_interleave(ptr[1:] - ptr[:-1]) - - -def index2ptr(index: Tensor, size: int) -> Tensor: - return torch._convert_indices_from_coo_to_csr( - index, size, out_int32=index.dtype == torch.int32) diff --git a/pytorch_geometric-2.3.1/torch_geometric/utils/subgraph.py b/pytorch_geometric-2.3.1/torch_geometric/utils/subgraph.py deleted file mode 100644 index 490e53a..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/utils/subgraph.py +++ /dev/null @@ -1,326 +0,0 @@ -from typing import List, Optional, Tuple, Union - -import torch -from torch import Tensor - -from torch_geometric.typing import OptTensor, PairTensor -from torch_geometric.utils.mask import index_to_mask -from torch_geometric.utils.num_nodes import maybe_num_nodes - - -def get_num_hops(model: torch.nn.Module) -> int: - r"""Returns the number of hops the model is aggregating information - from. - - Example: - - >>> class GNN(torch.nn.Module): - ... def __init__(self): - ... super().__init__() - ... self.conv1 = GCNConv(3, 16) - ... self.conv2 = GCNConv(16, 16) - ... self.lin = Linear(16, 2) - ... - ... def forward(self, x, edge_index): - ... x = torch.F.relu(self.conv1(x, edge_index)) - ... x = self.conv2(x, edge_index) - ... return self.lin(x) - >>> get_num_hops(GNN()) - 2 - """ - from torch_geometric.nn.conv import MessagePassing - num_hops = 0 - for module in model.modules(): - if isinstance(module, MessagePassing): - num_hops += 1 - return num_hops - - -def subgraph( - subset: Union[Tensor, List[int]], - edge_index: Tensor, - edge_attr: OptTensor = None, - relabel_nodes: bool = False, - num_nodes: Optional[int] = None, - return_edge_mask: bool = False, -) -> Union[Tuple[Tensor, OptTensor], Tuple[Tensor, OptTensor, OptTensor]]: - r"""Returns the induced subgraph of :obj:`(edge_index, edge_attr)` - containing the nodes in :obj:`subset`. - - Args: - subset (LongTensor, BoolTensor or [int]): The nodes to keep. - edge_index (LongTensor): The edge indices. - edge_attr (Tensor, optional): Edge weights or multi-dimensional - edge features. (default: :obj:`None`) - relabel_nodes (bool, optional): If set to :obj:`True`, the resulting - :obj:`edge_index` will be relabeled to hold consecutive indices - starting from zero. (default: :obj:`False`) - num_nodes (int, optional): The number of nodes, *i.e.* - :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) - return_edge_mask (bool, optional): If set to :obj:`True`, will return - the edge mask to filter out additional edge features. - (default: :obj:`False`) - - :rtype: (:class:`LongTensor`, :class:`Tensor`) - - Examples: - - >>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6], - ... [1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5]]) - >>> edge_attr = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]) - >>> subset = torch.tensor([3, 4, 5]) - >>> subgraph(subset, edge_index, edge_attr) - (tensor([[3, 4, 4, 5], - [4, 3, 5, 4]]), - tensor([ 7., 8., 9., 10.])) - - >>> subgraph(subset, edge_index, edge_attr, return_edge_mask=True) - (tensor([[3, 4, 4, 5], - [4, 3, 5, 4]]), - tensor([ 7., 8., 9., 10.]), - tensor([False, False, False, False, False, False, True, - True, True, True, False, False])) - """ - - device = edge_index.device - - if isinstance(subset, (list, tuple)): - subset = torch.tensor(subset, dtype=torch.long, device=device) - - if subset.dtype != torch.bool: - num_nodes = maybe_num_nodes(edge_index, num_nodes) - node_mask = index_to_mask(subset, size=num_nodes) - else: - num_nodes = subset.size(0) - node_mask = subset - - edge_mask = node_mask[edge_index[0]] & node_mask[edge_index[1]] - edge_index = edge_index[:, edge_mask] - edge_attr = edge_attr[edge_mask] if edge_attr is not None else None - - if relabel_nodes: - node_idx = torch.zeros(node_mask.size(0), dtype=torch.long, - device=device) - node_idx[subset] = torch.arange(node_mask.sum().item(), device=device) - edge_index = node_idx[edge_index] - - if return_edge_mask: - return edge_index, edge_attr, edge_mask - else: - return edge_index, edge_attr - - -def bipartite_subgraph( - subset: Union[PairTensor, Tuple[List[int], List[int]]], - edge_index: Tensor, - edge_attr: OptTensor = None, - relabel_nodes: bool = False, - size: Optional[Tuple[int, int]] = None, - return_edge_mask: bool = False, -) -> Union[Tuple[Tensor, OptTensor], Tuple[Tensor, OptTensor, OptTensor]]: - r"""Returns the induced subgraph of the bipartite graph - :obj:`(edge_index, edge_attr)` containing the nodes in :obj:`subset`. - - Args: - subset (Tuple[Tensor, Tensor] or tuple([int],[int])): The nodes - to keep. - edge_index (LongTensor): The edge indices. - edge_attr (Tensor, optional): Edge weights or multi-dimensional - edge features. (default: :obj:`None`) - relabel_nodes (bool, optional): If set to :obj:`True`, the resulting - :obj:`edge_index` will be relabeled to hold consecutive indices - starting from zero. (default: :obj:`False`) - size (tuple, optional): The number of nodes. - (default: :obj:`None`) - return_edge_mask (bool, optional): If set to :obj:`True`, will return - the edge mask to filter out additional edge features. - (default: :obj:`False`) - - :rtype: (:class:`LongTensor`, :class:`Tensor`) - - Examples: - - >>> edge_index = torch.tensor([[0, 5, 2, 3, 3, 4, 4, 3, 5, 5, 6], - ... [0, 0, 3, 2, 0, 0, 2, 1, 2, 3, 1]]) - >>> edge_attr = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) - >>> subset = (torch.tensor([2, 3, 5]), torch.tensor([2, 3])) - >>> bipartite_subgraph(subset, edge_index, edge_attr) - (tensor([[2, 3, 5, 5], - [3, 2, 2, 3]]), - tensor([ 3, 4, 9, 10])) - - >>> bipartite_subgraph(subset, edge_index, edge_attr, - ... return_edge_mask=True) - (tensor([[2, 3, 5, 5], - [3, 2, 2, 3]]), - tensor([ 3, 4, 9, 10]), - tensor([False, False, True, True, False, False, False, False, - True, True, False])) - """ - - device = edge_index.device - - src_subset, dst_subset = subset - if not isinstance(src_subset, Tensor): - src_subset = torch.tensor(src_subset, dtype=torch.long, device=device) - if not isinstance(dst_subset, Tensor): - dst_subset = torch.tensor(dst_subset, dtype=torch.long, device=device) - - if src_subset.dtype != torch.bool: - src_size = int(edge_index[0].max()) + 1 if size is None else size[0] - src_node_mask = index_to_mask(src_subset, size=src_size) - else: - src_size = src_subset.size(0) - src_node_mask = src_subset - - if dst_subset.dtype != torch.bool: - dst_size = int(edge_index[1].max()) + 1 if size is None else size[1] - dst_node_mask = index_to_mask(dst_subset, size=dst_size) - else: - dst_size = dst_subset.size(0) - dst_node_mask = dst_subset - - edge_mask = src_node_mask[edge_index[0]] & dst_node_mask[edge_index[1]] - edge_index = edge_index[:, edge_mask] - edge_attr = edge_attr[edge_mask] if edge_attr is not None else None - - if relabel_nodes: - node_idx_i = edge_index.new_zeros(src_node_mask.size(0)) - node_idx_j = edge_index.new_zeros(dst_node_mask.size(0)) - node_idx_i[src_subset] = torch.arange(int(src_node_mask.sum()), - device=node_idx_i.device) - node_idx_j[dst_subset] = torch.arange(int(dst_node_mask.sum()), - device=node_idx_j.device) - edge_index = torch.stack([ - node_idx_i[edge_index[0]], - node_idx_j[edge_index[1]], - ], dim=0) - - if return_edge_mask: - return edge_index, edge_attr, edge_mask - else: - return edge_index, edge_attr - - -def k_hop_subgraph( - node_idx: Union[int, List[int], Tensor], - num_hops: int, - edge_index: Tensor, - relabel_nodes: bool = False, - num_nodes: Optional[int] = None, - flow: str = 'source_to_target', - directed: bool = False, -) -> Tuple[Tensor, Tensor, Tensor, Tensor]: - r"""Computes the induced subgraph of :obj:`edge_index` around all nodes in - :attr:`node_idx` reachable within :math:`k` hops. - - The :attr:`flow` argument denotes the direction of edges for finding - :math:`k`-hop neighbors. If set to :obj:`"source_to_target"`, then the - method will find all neighbors that point to the initial set of seed nodes - in :attr:`node_idx.` - This mimics the natural flow of message passing in Graph Neural Networks. - - The method returns (1) the nodes involved in the subgraph, (2) the filtered - :obj:`edge_index` connectivity, (3) the mapping from node indices in - :obj:`node_idx` to their new location, and (4) the edge mask indicating - which edges were preserved. - - Args: - node_idx (int, list, tuple or :obj:`torch.Tensor`): The central seed - node(s). - num_hops (int): The number of hops :math:`k`. - edge_index (LongTensor): The edge indices. - relabel_nodes (bool, optional): If set to :obj:`True`, the resulting - :obj:`edge_index` will be relabeled to hold consecutive indices - starting from zero. (default: :obj:`False`) - num_nodes (int, optional): The number of nodes, *i.e.* - :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) - flow (str, optional): The flow direction of :math:`k`-hop aggregation - (:obj:`"source_to_target"` or :obj:`"target_to_source"`). - (default: :obj:`"source_to_target"`) - directed (bool, optional): If set to :obj:`False`, will include all - edges between all sampled nodes. (default: :obj:`True`) - - :rtype: (:class:`LongTensor`, :class:`LongTensor`, :class:`LongTensor`, - :class:`BoolTensor`) - - Examples: - - >>> edge_index = torch.tensor([[0, 1, 2, 3, 4, 5], - ... [2, 2, 4, 4, 6, 6]]) - - >>> # Center node 6, 2-hops - >>> subset, edge_index, mapping, edge_mask = k_hop_subgraph( - ... 6, 2, edge_index, relabel_nodes=True) - >>> subset - tensor([2, 3, 4, 5, 6]) - >>> edge_index - tensor([[0, 1, 2, 3], - [2, 2, 4, 4]]) - >>> mapping - tensor([4]) - >>> edge_mask - tensor([False, False, True, True, True, True]) - >>> subset[mapping] - tensor([6]) - - >>> edge_index = torch.tensor([[1, 2, 4, 5], - ... [0, 1, 5, 6]]) - >>> (subset, edge_index, - ... mapping, edge_mask) = k_hop_subgraph([0, 6], 2, - ... edge_index, - ... relabel_nodes=True) - >>> subset - tensor([0, 1, 2, 4, 5, 6]) - >>> edge_index - tensor([[1, 2, 3, 4], - [0, 1, 4, 5]]) - >>> mapping - tensor([0, 5]) - >>> edge_mask - tensor([True, True, True, True]) - >>> subset[mapping] - tensor([0, 6]) - """ - - num_nodes = maybe_num_nodes(edge_index, num_nodes) - - assert flow in ['source_to_target', 'target_to_source'] - if flow == 'target_to_source': - row, col = edge_index - else: - col, row = edge_index - - node_mask = row.new_empty(num_nodes, dtype=torch.bool) - edge_mask = row.new_empty(row.size(0), dtype=torch.bool) - - if isinstance(node_idx, (int, list, tuple)): - node_idx = torch.tensor([node_idx], device=row.device).flatten() - else: - node_idx = node_idx.to(row.device) - - subsets = [node_idx] - - for _ in range(num_hops): - node_mask.fill_(False) - node_mask[subsets[-1]] = True - torch.index_select(node_mask, 0, row, out=edge_mask) - subsets.append(col[edge_mask]) - - subset, inv = torch.cat(subsets).unique(return_inverse=True) - inv = inv[:node_idx.numel()] - - node_mask.fill_(False) - node_mask[subset] = True - - if not directed: - edge_mask = node_mask[row] & node_mask[col] - - edge_index = edge_index[:, edge_mask] - - if relabel_nodes: - node_idx = row.new_full((num_nodes, ), -1) - node_idx[subset] = torch.arange(subset.size(0), device=row.device) - edge_index = node_idx[edge_index] - - return subset, edge_index, inv, edge_mask diff --git a/pytorch_geometric-2.3.1/torch_geometric/utils/trim_to_layer.py b/pytorch_geometric-2.3.1/torch_geometric/utils/trim_to_layer.py deleted file mode 100644 index 058bb40..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/utils/trim_to_layer.py +++ /dev/null @@ -1,125 +0,0 @@ -from typing import Dict, List, Optional, Tuple, Union - -import torch -from torch import Tensor - -from torch_geometric.typing import ( - EdgeType, - MaybeHeteroEdgeTensor, - MaybeHeteroNodeTensor, - NodeType, -) - - -def trim_to_layer( - layer: int, - num_sampled_nodes_per_hop: Union[List[int], Dict[NodeType, List[int]]], - num_sampled_edges_per_hop: Union[List[int], Dict[EdgeType, List[int]]], - x: Union[MaybeHeteroNodeTensor], - edge_index: Union[MaybeHeteroEdgeTensor], - edge_attr: Optional[MaybeHeteroEdgeTensor] = None, -) -> Tuple[MaybeHeteroEdgeTensor, MaybeHeteroNodeTensor, - Optional[MaybeHeteroEdgeTensor]]: - r"""Trims the :obj:`edge_index` representation, node features :obj:`x` and - edge features :obj:`edge_attr` to a minimal-sized representation for the - current GNN layer :obj:`layer` in directed - :class:`~torch_geometric.loader.NeighborLoader` scenarios. - - This ensures that no computation is performed for nodes and edges that are - not included in the current GNN layer, thus avoiding unnecessary - computation within the GNN when performing neighborhood sampling. - - Args: - layer (int): The current GNN layer. - num_sampled_nodes_per_hop (List[int] or Dict[NodeType, List[int]]): The - number of sampled nodes per hop. - num_sampled_edges_per_hop (List[int] or Dict[EdgeType, List[int]]): The - number of sampled edges per hop. - x (torch.Tensor or Dict[NodeType, torch.Tensor]): The homogeneous or - heterogeneous (hidden) node features. - edge_index (torch.Tensor or Dict[EdgeType, torch.Tensor]): The - homogeneous or heterogeneous edge indices. - edge_attr (torch.Tensor or Dict[EdgeType, torch.Tensor], optional): The - homogeneous or heterogeneous (hidden) edge features. - """ - if layer <= 0: - return x, edge_index, edge_attr - - if isinstance(num_sampled_edges_per_hop, dict): - x = { - k: v.narrow( - dim=0, - start=0, - length=v.size(0) - num_sampled_nodes_per_hop[k][-layer], - ) - for k, v in x.items() - } - edge_index = { - k: v.narrow( - dim=1, - start=0, - length=v.size(1) - num_sampled_edges_per_hop[k][-layer], - ) - for k, v in edge_index.items() - } - if edge_attr is not None: - edge_attr = { - k: v.narrow( - dim=0, - start=0, - length=v.size(0) - num_sampled_edges_per_hop[k][-layer], - ) - for k, v in edge_attr.items() - } - return x, edge_index, edge_attr - - x = x.narrow( - dim=0, - start=0, - length=x.size(0) - num_sampled_nodes_per_hop[-layer], - ) - edge_index = edge_index.narrow( - dim=1, - start=0, - length=edge_index.size(1) - num_sampled_edges_per_hop[-layer], - ) - if edge_attr is not None: - edge_attr = edge_attr.narrow( - dim=0, - start=0, - length=edge_attr.size(0) - num_sampled_edges_per_hop[-layer], - ) - return x, edge_index, edge_attr - - -class TrimToLayer(torch.nn.Module): - def forward( - self, - layer: int, - num_sampled_nodes_per_hop: Optional[List[int]], - num_sampled_edges_per_hop: Optional[List[int]], - x: Tensor, - edge_index: Tensor, - edge_attr: Optional[Tensor] = None, - ) -> Tuple[Tensor, Tensor, Optional[Tensor]]: - - if (not isinstance(num_sampled_nodes_per_hop, list) - and isinstance(num_sampled_edges_per_hop, list)): - raise ValueError("'num_sampled_nodes_per_hop' needs to be given") - if (not isinstance(num_sampled_edges_per_hop, list) - and isinstance(num_sampled_nodes_per_hop, list)): - raise ValueError("'num_sampled_edges_per_hop' needs to be given") - - if num_sampled_nodes_per_hop is None: - return x, edge_index, edge_attr - if num_sampled_edges_per_hop is None: - return x, edge_index, edge_attr - - return trim_to_layer( - layer, - num_sampled_nodes_per_hop, - num_sampled_edges_per_hop, - x, - edge_index, - edge_attr, - ) diff --git a/pytorch_geometric-2.3.1/torch_geometric/utils/unbatch.py b/pytorch_geometric-2.3.1/torch_geometric/utils/unbatch.py deleted file mode 100644 index 62efcbf..0000000 --- a/pytorch_geometric-2.3.1/torch_geometric/utils/unbatch.py +++ /dev/null @@ -1,62 +0,0 @@ -from typing import List - -import torch -from torch import Tensor - -from torch_geometric.utils import degree - - -def unbatch(src: Tensor, batch: Tensor, dim: int = 0) -> List[Tensor]: - r"""Splits :obj:`src` according to a :obj:`batch` vector along dimension - :obj:`dim`. - - Args: - src (Tensor): The source tensor. - batch (LongTensor): The batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each - entry in :obj:`src` to a specific example. Must be ordered. - dim (int, optional): The dimension along which to split the :obj:`src` - tensor. (default: :obj:`0`) - - :rtype: :class:`List[Tensor]` - - Example: - - >>> src = torch.arange(7) - >>> batch = torch.tensor([0, 0, 0, 1, 1, 2, 2]) - >>> unbatch(src, batch) - (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6])) - """ - sizes = degree(batch, dtype=torch.long).tolist() - return src.split(sizes, dim) - - -def unbatch_edge_index(edge_index: Tensor, batch: Tensor) -> List[Tensor]: - r"""Splits the :obj:`edge_index` according to a :obj:`batch` vector. - - Args: - edge_index (Tensor): The edge_index tensor. Must be ordered. - batch (LongTensor): The batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each - node to a specific example. Must be ordered. - - :rtype: :class:`List[Tensor]` - - Example: - - >>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3, 4, 5, 5, 6], - ... [1, 0, 2, 1, 3, 2, 5, 4, 6, 5]]) - >>> batch = torch.tensor([0, 0, 0, 0, 1, 1, 1]) - >>> unbatch_edge_index(edge_index, batch) - (tensor([[0, 1, 1, 2, 2, 3], - [1, 0, 2, 1, 3, 2]]), - tensor([[0, 1, 1, 2], - [1, 0, 2, 1]])) - """ - deg = degree(batch, dtype=torch.int64) - ptr = torch.cat([deg.new_zeros(1), deg.cumsum(dim=0)[:-1]], dim=0) - - edge_batch = batch[edge_index[0]] - edge_index = edge_index - ptr[edge_batch] - sizes = degree(edge_batch, dtype=torch.int64).cpu().tolist() - return edge_index.split(sizes, dim=1) diff --git a/pytorch_geometric-2.4.0/.gitattributes b/pytorch_geometric-2.4.0/.gitattributes new file mode 100644 index 0000000..d9bd16b --- /dev/null +++ b/pytorch_geometric-2.4.0/.gitattributes @@ -0,0 +1 @@ +*.py text eol=lf diff --git a/pytorch_geometric-2.4.0/.github/CODEOWNERS b/pytorch_geometric-2.4.0/.github/CODEOWNERS new file mode 100644 index 0000000..ac674f0 --- /dev/null +++ b/pytorch_geometric-2.4.0/.github/CODEOWNERS @@ -0,0 +1,34 @@ +# About code owners +# https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners +# +# Code owners recieve review requests when parts of code they "own" change. + +*.py @rusty1s @wsad1 + +/torch_geometric/nn/ @EdisonLeeeee + +/torch_geometric/explain/ @RexYing @RBendias @dufourc1 + +/torch_geometric/data/ @mananshah99 + +/torch_geometric/loader/ @mananshah99 @pyg-team/intel-team + +/torch_geometric/sampler/ @mananshah99 @pyg-team/intel-team + +/torch_geometric/transforms/ @wsad1 + +/torch_geometric/utils/ @wsad1 + +/torch_geometric/datasets/ @wsad1 + +/torch_geometric/graphgym/ @JiaxuanYou + +/test/ @wsad1 + +/docs/ @rusty1s + +/graphgym/ @JiaxuanYou + +/examples/ @wsad1 + +/benchmark/ @pyg-team/intel-team diff --git a/pytorch_geometric-2.3.1/.github/CONTRIBUTING.md b/pytorch_geometric-2.4.0/.github/CONTRIBUTING.md similarity index 84% rename from pytorch_geometric-2.3.1/.github/CONTRIBUTING.md rename to pytorch_geometric-2.4.0/.github/CONTRIBUTING.md index 30ff755..d108e0e 100644 --- a/pytorch_geometric-2.3.1/.github/CONTRIBUTING.md +++ b/pytorch_geometric-2.4.0/.github/CONTRIBUTING.md @@ -16,22 +16,24 @@ If your PR isn't merged anytime soon (*e.g.,* due to its large size, complexity To develop PyG on your machine, here are some tips: -1. Ensure that you are running on one of the two latest PyTorch releases (*e.g.*, `1.12.0`): +1. Ensure that you are running on one of the two latest PyTorch releases (*e.g.*, `2.0.0`): ```python import torch print(torch.__version__) ``` -2. Follow the [installation instructions](https://github.com/pyg-team/pytorch_geometric#installation) to install `pyg-lib`, `torch-scatter`, `torch-sparse`, `torch-cluster` and `torch-spline-conv` (if you haven't already): +2. *(Optional)* Follow the [installation instructions](https://github.com/pyg-team/pytorch_geometric#installation) to install `pyg-lib`, `torch-scatter`, `torch-sparse`, `torch-cluster` and `torch-spline-conv` (if you haven't already). + Note that this step is optional and only necessary if you develop a feature that uses one of these libraries. ```bash pip install pyg-lib torch-scatter torch-sparse torch-cluster torch-spline-conv -f https://data.pyg.org/whl/torch-${TORCH}+${CUDA}.html ``` - where `${TORCH}` should be replaced by your PyTorch version (*e.g.*, `1.12.0`), and `${CUDA}` should be replaced by your CUDA version (*e.g.*, `cpu` or `cu116`). + where `${TORCH}` should be replaced by your PyTorch version (*e.g.*, `2.0.0`), and `${CUDA}` should be replaced by your CUDA version (*e.g.*, `cpu` or `cu118`). -3. Uninstall all existing PyG installations: +3. Uninstall all existing PyG installations. + It is advised to run this command repeatedly to confirm that installations across all locations are properly removed. ```bash pip uninstall torch-geometric @@ -57,7 +59,8 @@ To develop PyG on your machine, here are some tips: pip install -e ".[dev,full]" ``` - This mode will symlink the Python files from the current local source tree into the Python install. Hence, if you modify a Python file, you do not need to reinstall PyG again and again. + This mode will symlink the Python files from the current local source tree into the Python install. + Hence, if you modify a Python file, you do not need to re-install PyG again. 7. Ensure that you have a working PyG installation by running the entire test suite with @@ -90,7 +93,8 @@ PyG uses [GitHub Actions](https://github.com/pyg-team/pytorch_geometric/actions) Everytime you send a Pull Request, your commit will be built and checked against the PyG guidelines: -1. Ensure that your code is formatted correctly by testing against the styleguide of [`flake8`](https://github.com/PyCQA/flake8): +1. Ensure that your code is formatted correctly by testing against the styleguide of [`flake8`](https://github.com/PyCQA/flake8). + We use the [`Flake8-pyproject`](https://pypi.org/project/Flake8-pyproject/) plugin for configuration: ```bash flake8 . diff --git a/pytorch_geometric-2.3.1/.github/ISSUE_TEMPLATE/bug-report.yml b/pytorch_geometric-2.4.0/.github/ISSUE_TEMPLATE/bug-report.yml similarity index 100% rename from pytorch_geometric-2.3.1/.github/ISSUE_TEMPLATE/bug-report.yml rename to pytorch_geometric-2.4.0/.github/ISSUE_TEMPLATE/bug-report.yml diff --git a/pytorch_geometric-2.3.1/.github/ISSUE_TEMPLATE/config.yml b/pytorch_geometric-2.4.0/.github/ISSUE_TEMPLATE/config.yml similarity index 100% rename from pytorch_geometric-2.3.1/.github/ISSUE_TEMPLATE/config.yml rename to pytorch_geometric-2.4.0/.github/ISSUE_TEMPLATE/config.yml diff --git a/pytorch_geometric-2.3.1/.github/ISSUE_TEMPLATE/documentation.yml b/pytorch_geometric-2.4.0/.github/ISSUE_TEMPLATE/documentation.yml similarity index 100% rename from pytorch_geometric-2.3.1/.github/ISSUE_TEMPLATE/documentation.yml rename to pytorch_geometric-2.4.0/.github/ISSUE_TEMPLATE/documentation.yml diff --git a/pytorch_geometric-2.3.1/.github/ISSUE_TEMPLATE/feature-request.yml b/pytorch_geometric-2.4.0/.github/ISSUE_TEMPLATE/feature-request.yml similarity index 100% rename from pytorch_geometric-2.3.1/.github/ISSUE_TEMPLATE/feature-request.yml rename to pytorch_geometric-2.4.0/.github/ISSUE_TEMPLATE/feature-request.yml diff --git a/pytorch_geometric-2.3.1/.github/ISSUE_TEMPLATE/installation.yml b/pytorch_geometric-2.4.0/.github/ISSUE_TEMPLATE/installation.yml similarity index 100% rename from pytorch_geometric-2.3.1/.github/ISSUE_TEMPLATE/installation.yml rename to pytorch_geometric-2.4.0/.github/ISSUE_TEMPLATE/installation.yml diff --git a/pytorch_geometric-2.3.1/.github/ISSUE_TEMPLATE/refactor.yml b/pytorch_geometric-2.4.0/.github/ISSUE_TEMPLATE/refactor.yml similarity index 100% rename from pytorch_geometric-2.3.1/.github/ISSUE_TEMPLATE/refactor.yml rename to pytorch_geometric-2.4.0/.github/ISSUE_TEMPLATE/refactor.yml diff --git a/pytorch_geometric-2.4.0/.github/actions/setup/action.yml b/pytorch_geometric-2.4.0/.github/actions/setup/action.yml new file mode 100644 index 0000000..bed4899 --- /dev/null +++ b/pytorch_geometric-2.4.0/.github/actions/setup/action.yml @@ -0,0 +1,76 @@ +name: Setup + +inputs: + python-version: + required: false + default: '3.8' + torch-version: + required: false + default: '2.1.0' + cuda-version: + required: false + default: cpu + torchvision-version: + required: false + default: '0.16.0' + full_install: + required: false + default: true + +runs: + using: composite + + steps: + - name: Set up Python ${{ inputs.python-version }} + uses: actions/setup-python@v4.3.0 + with: + python-version: ${{ inputs.python-version }} + check-latest: true + cache: pip + cache-dependency-path: | + pyproject.toml + + - name: Install PyTorch ${{ inputs.torch-version }}+${{ inputs.cuda-version }} + if: ${{ inputs.torch-version != 'nightly' }} + run: | + pip install torch==${{ inputs.torch-version }} --extra-index-url https://download.pytorch.org/whl/${{ inputs.cuda-version }} + python -c "import torch; print('PyTorch:', torch.__version__)" + python -c "import torch; print('CUDA available:', torch.cuda.is_available())" + python -c "import torch; print('CUDA:', torch.version.cuda)" + shell: bash + + - name: Install PyTorch ${{ inputs.torch-version }}+${{ inputs.cuda-version }} + if: ${{ inputs.torch-version == 'nightly' }} + run: | + pip install --pre torch --extra-index-url https://download.pytorch.org/whl/nightly/${{ inputs.cuda-version }} + python -c "import torch; print('PyTorch:', torch.__version__)" + python -c "import torch; print('CUDA available:', torch.cuda.is_available())" + python -c "import torch; print('CUDA:', torch.version.cuda)" + shell: bash + + - name: Install pyg-lib # pyg-lib is currently only available on Linux. + if: ${{ inputs.torch-version != 'nightly' && runner.os == 'Linux' }} + run: | + pip uninstall -y pyg-lib + pip install --no-index pyg-lib -f https://data.pyg.org/whl/nightly/torch-${{ inputs.torch-version }}+${{ inputs.cuda-version }}.html + shell: bash + + - name: Install faiss-cpu + if: ${{ inputs.cuda-version == 'cpu' }} + run: | + pip install faiss-cpu + shell: bash + + - name: Install faiss-gpu + if: ${{ inputs.cuda-version != 'cpu' }} + run: | + pip install faiss-gpu + shell: bash + + - name: Install extension packages + if: ${{ inputs.full_install == 'true' && inputs.torch-version != 'nightly' }} + run: | + pip install torchvision==${{ inputs.torchvision-version }} --extra-index-url https://download.pytorch.org/whl/${{ inputs.cuda-version }} + pip install scipy + pip install --no-index --upgrade torch-scatter torch-sparse torch-cluster torch-spline-conv -f https://data.pyg.org/whl/torch-${{ inputs.torch-version }}+${{ inputs.cuda-version }}.html + shell: bash diff --git a/pytorch_geometric-2.3.1/.github/labeler.yml b/pytorch_geometric-2.4.0/.github/labeler.yml similarity index 100% rename from pytorch_geometric-2.3.1/.github/labeler.yml rename to pytorch_geometric-2.4.0/.github/labeler.yml diff --git a/pytorch_geometric-2.4.0/.github/workflows/building_pyg_conda.yml b/pytorch_geometric-2.4.0/.github/workflows/building_pyg_conda.yml new file mode 100644 index 0000000..f121aca --- /dev/null +++ b/pytorch_geometric-2.4.0/.github/workflows/building_pyg_conda.yml @@ -0,0 +1,89 @@ +name: Building PyG Conda + +on: [workflow_dispatch] # yamllint disable-line rule:truthy + +jobs: + + conda-build: + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + python-version: ['3.8', '3.9', '3.10', '3.11'] + torch-version: [1.12.0, 1.13.0, 2.0.0, 2.1.0] + cuda-version: ['cpu', 'cu113', 'cu116', 'cu117', 'cu118', 'cu121'] + exclude: + - torch-version: 1.12.0 + python-version: '3.11' + - torch-version: 1.12.0 + cuda-version: 'cu117' + - torch-version: 1.12.0 + cuda-version: 'cu118' + - torch-version: 1.12.0 + cuda-version: 'cu121' + - torch-version: 1.13.0 + python-version: '3.11' + - torch-version: 1.13.0 + cuda-version: 'cu113' + - torch-version: 1.13.0 + cuda-version: 'cu118' + - torch-version: 1.13.0 + cuda-version: 'cu121' + - torch-version: 2.0.0 + cuda-version: 'cu113' + - torch-version: 2.0.0 + cuda-version: 'cu116' + - torch-version: 1.13.0 + cuda-version: 'cu121' + - torch-version: 2.1.0 + cuda-version: 'cu113' + - torch-version: 2.1.0 + cuda-version: 'cu116' + - torch-version: 2.1.0 + cuda-version: 'cu117' + - os: macos-latest + cuda-version: 'cu113' + - os: macos-latest + cuda-version: 'cu115' + - os: macos-latest + cuda-version: 'cu116' + - os: macos-latest + cuda-version: 'cu117' + - os: macos-latest + cuda-version: 'cu118' + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Conda for Python ${{ matrix.python-version }} + uses: conda-incubator/setup-miniconda@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Free Disk Space (Ubuntu) + if: ${{ runner.os == 'Linux' }} + uses: jlumbroso/free-disk-space@main + + - name: Install Conda packages + run: | + conda install conda-build conda-verify --yes + shell: + bash -l {0} + + - name: Build Conda package + run: | + ./conda/pyg/build_conda.sh ${{ matrix.python-version }} ${{ matrix.torch-version }} ${{ matrix.cuda-version }} + shell: + bash -l {0} + + - name: Publish Conda package + run: | + conda install anaconda-client --yes + anaconda upload --force --label main $HOME/conda-bld/*/*.tar.bz2 + env: + ANACONDA_API_TOKEN: ${{ secrets.PYG_ANACONDA_TOKEN }} + shell: + bash -l {0} diff --git a/pytorch_geometric-2.4.0/.github/workflows/building_rusty1s_conda.yml b/pytorch_geometric-2.4.0/.github/workflows/building_rusty1s_conda.yml new file mode 100644 index 0000000..2627422 --- /dev/null +++ b/pytorch_geometric-2.4.0/.github/workflows/building_rusty1s_conda.yml @@ -0,0 +1,89 @@ +name: Building rusty1s Conda + +on: [workflow_dispatch] # yamllint disable-line rule:truthy + +jobs: + + conda-build: + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + python-version: ['3.8', '3.9', '3.10', '3.11'] + torch-version: [1.12.0, 1.13.0, 2.0.0, 2.1.0] + cuda-version: ['cpu', 'cu113', 'cu116', 'cu117', 'cu118', 'cu121'] + exclude: + - torch-version: 1.12.0 + python-version: '3.11' + - torch-version: 1.12.0 + cuda-version: 'cu117' + - torch-version: 1.12.0 + cuda-version: 'cu118' + - torch-version: 1.12.0 + cuda-version: 'cu121' + - torch-version: 1.13.0 + python-version: '3.11' + - torch-version: 1.13.0 + cuda-version: 'cu113' + - torch-version: 1.13.0 + cuda-version: 'cu118' + - torch-version: 1.13.0 + cuda-version: 'cu121' + - torch-version: 2.0.0 + cuda-version: 'cu113' + - torch-version: 2.0.0 + cuda-version: 'cu116' + - torch-version: 1.13.0 + cuda-version: 'cu121' + - torch-version: 2.1.0 + cuda-version: 'cu113' + - torch-version: 2.1.0 + cuda-version: 'cu116' + - torch-version: 2.1.0 + cuda-version: 'cu117' + - os: macos-latest + cuda-version: 'cu113' + - os: macos-latest + cuda-version: 'cu115' + - os: macos-latest + cuda-version: 'cu116' + - os: macos-latest + cuda-version: 'cu117' + - os: macos-latest + cuda-version: 'cu118' + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Conda for Python ${{ matrix.python-version }} + uses: conda-incubator/setup-miniconda@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Free Disk Space (Ubuntu) + if: ${{ runner.os == 'Linux' }} + uses: jlumbroso/free-disk-space@main + + - name: Install Conda packages + run: | + conda install conda-build conda-verify --yes + shell: + bash -l {0} + + - name: Build Conda package + run: | + ./conda/pytorch-geometric/build_conda.sh ${{ matrix.python-version }} ${{ matrix.torch-version }} ${{ matrix.cuda-version }} + shell: + bash -l {0} + + - name: Publish Conda package + run: | + conda install anaconda-client --yes + anaconda upload --force --label main $HOME/conda-bld/*/*.tar.bz2 + env: + ANACONDA_API_TOKEN: ${{ secrets.RUSTY1S_ANACONDA_TOKEN }} + shell: + bash -l {0} diff --git a/pytorch_geometric-2.3.1/.github/workflows/changelog.yml b/pytorch_geometric-2.4.0/.github/workflows/changelog.yml similarity index 100% rename from pytorch_geometric-2.3.1/.github/workflows/changelog.yml rename to pytorch_geometric-2.4.0/.github/workflows/changelog.yml diff --git a/pytorch_geometric-2.3.1/.github/workflows/documentation.yml b/pytorch_geometric-2.4.0/.github/workflows/documentation.yml similarity index 93% rename from pytorch_geometric-2.3.1/.github/workflows/documentation.yml rename to pytorch_geometric-2.4.0/.github/workflows/documentation.yml index 4d69fbd..b61f33a 100644 --- a/pytorch_geometric-2.3.1/.github/workflows/documentation.yml +++ b/pytorch_geometric-2.4.0/.github/workflows/documentation.yml @@ -13,7 +13,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 40 @@ -28,6 +28,7 @@ jobs: docker/** examples/** graphgym/** + README.md CHANGELOG.md - name: Setup packages @@ -39,6 +40,7 @@ jobs: - name: Install main package if: steps.changed-files-specific.outputs.only_changed != 'true' run: | + pip install nbsphinx pip install git+https://github.com/pyg-team/pyg_sphinx_theme.git pip install -e . diff --git a/pytorch_geometric-2.3.1/.github/workflows/examples.yml b/pytorch_geometric-2.4.0/.github/workflows/examples.yml similarity index 96% rename from pytorch_geometric-2.3.1/.github/workflows/examples.yml rename to pytorch_geometric-2.4.0/.github/workflows/examples.yml index c438dbc..3447a66 100644 --- a/pytorch_geometric-2.3.1/.github/workflows/examples.yml +++ b/pytorch_geometric-2.4.0/.github/workflows/examples.yml @@ -13,7 +13,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup packages uses: ./.github/actions/setup diff --git a/pytorch_geometric-2.4.0/.github/workflows/full_gpu_testing.yml b/pytorch_geometric-2.4.0/.github/workflows/full_gpu_testing.yml new file mode 100644 index 0000000..83996c8 --- /dev/null +++ b/pytorch_geometric-2.4.0/.github/workflows/full_gpu_testing.yml @@ -0,0 +1,34 @@ +name: Full GPU Testing + +on: # yamllint disable-line rule:truthy + workflow_dispatch: + schedule: + - cron: "0 6 * * *" # Everyday at 6:00am UTC/10:00pm PST + +jobs: + + full_gpu_pytest: + if: github.repository == 'pyg-team/pytorch_geometric' + runs-on: [self-hosted, nvidia] + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Setup packages + uses: ./.github/actions/setup + with: + cuda-version: 'cu118' + + - name: Print GPU information + run: | + nvidia-smi + + - name: Install main package + run: | + pip install -e .[full,test] + + - name: Run tests + run: | + FULL_TEST=1 pytest + shell: bash diff --git a/pytorch_geometric-2.4.0/.github/workflows/full_testing.yml b/pytorch_geometric-2.4.0/.github/workflows/full_testing.yml new file mode 100644 index 0000000..a9dde19 --- /dev/null +++ b/pytorch_geometric-2.4.0/.github/workflows/full_testing.yml @@ -0,0 +1,60 @@ +name: Full Testing + +on: # yamllint disable-line rule:truthy + workflow_dispatch: + schedule: + - cron: "0 6 * * *" # Everyday at 6:00am UTC/10:00pm PST + +jobs: + + full_pytest: + if: github.repository == 'pyg-team/pytorch_geometric' + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest] + python-version: ['3.8', '3.10'] + torch-version: [1.12.0, 1.13.0, 2.0.0, 2.1.0, nightly] + include: + - torch-version: 1.12.0 + torchvision-version: 0.13.0 + - torch-version: 1.13.0 + torchvision-version: 0.14.0 + - torch-version: 2.0.0 + torchvision-version: 0.15.0 + - torch-version: 2.1.0 + torchvision-version: 0.16.0 + - torch-version: nightly + torchvision-version: nightly + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Setup packages + uses: ./.github/actions/setup + with: + python-version: ${{ matrix.python-version }} + torch-version: ${{ matrix.torch-version }} + torchvision-version: ${{ matrix.torchvision-version }} + + - name: Install graphviz + if: ${{ runner.os == 'Linux' }} + run: | + sudo apt-get install graphviz + + - name: Install main package + run: | + pip install -e .[full,test] + + - name: Run tests + run: | + FULL_TEST=1 pytest --cov --cov-report=xml + shell: bash + + - name: Upload coverage + uses: codecov/codecov-action@v2 + with: + fail_ci_if_error: false diff --git a/pytorch_geometric-2.3.1/.github/workflows/labeler.yml b/pytorch_geometric-2.4.0/.github/workflows/labeler.yml similarity index 100% rename from pytorch_geometric-2.3.1/.github/workflows/labeler.yml rename to pytorch_geometric-2.4.0/.github/workflows/labeler.yml diff --git a/pytorch_geometric-2.4.0/.github/workflows/latest_testing.yml b/pytorch_geometric-2.4.0/.github/workflows/latest_testing.yml new file mode 100644 index 0000000..4a5783f --- /dev/null +++ b/pytorch_geometric-2.4.0/.github/workflows/latest_testing.yml @@ -0,0 +1,49 @@ +name: Testing PyTorch nightly + +on: # yamllint disable-line rule:truthy + push: + branches: + - master + pull_request: + +jobs: + + latest_pytest: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 40 + + # Skip workflow if only certain files have been changed. + - name: Get changed files + id: changed-files-specific + uses: tj-actions/changed-files@v34 + with: + files: | + benchmark/** + conda/** + docker/** + docs/** + examples/** + graphgym/** + README.md + CHANGELOG.md + + - name: Setup packages + if: steps.changed-files-specific.outputs.only_changed != 'true' + uses: ./.github/actions/setup + with: + torch-version: nightly + + - name: Install main package + if: steps.changed-files-specific.outputs.only_changed != 'true' + run: | + pip install -e .[full,test] + + - name: Run tests + if: steps.changed-files-specific.outputs.only_changed != 'true' + run: | + pytest diff --git a/pytorch_geometric-2.3.1/.github/workflows/linting.yml b/pytorch_geometric-2.4.0/.github/workflows/linting.yml similarity index 80% rename from pytorch_geometric-2.3.1/.github/workflows/linting.yml rename to pytorch_geometric-2.4.0/.github/workflows/linting.yml index 37404d8..77da597 100644 --- a/pytorch_geometric-2.3.1/.github/workflows/linting.yml +++ b/pytorch_geometric-2.4.0/.github/workflows/linting.yml @@ -13,7 +13,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup packages uses: ./.github/actions/setup @@ -34,16 +34,12 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v4.3.0 with: python-version: 3.8 - check-latest: true - cache: pip - cache-dependency-path: | - setup.py - name: Install dependencies run: pip install pylint @@ -57,16 +53,12 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v4.3.0 with: python-version: 3.8 - check-latest: true - cache: pip - cache-dependency-path: | - setup.py - name: Install dependencies run: pip install mypy diff --git a/pytorch_geometric-2.4.0/.github/workflows/minimal_testing.yml b/pytorch_geometric-2.4.0/.github/workflows/minimal_testing.yml new file mode 100644 index 0000000..ee6c57d --- /dev/null +++ b/pytorch_geometric-2.4.0/.github/workflows/minimal_testing.yml @@ -0,0 +1,49 @@ +name: Testing minimal PyTorch 2.1 + +on: # yamllint disable-line rule:truthy + push: + branches: + - master + pull_request: + +jobs: + + minimal_pytest: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 40 + + # Skip workflow if only certain files have been changed. + - name: Get changed files + id: changed-files-specific + uses: tj-actions/changed-files@v34 + with: + files: | + benchmark/** + conda/** + docker/** + docs/** + examples/** + graphgym/** + README.md + CHANGELOG.md + + - name: Setup packages + if: steps.changed-files-specific.outputs.only_changed != 'true' + uses: ./.github/actions/setup + with: + full_install: false + + - name: Install main package + if: steps.changed-files-specific.outputs.only_changed != 'true' + run: | + pip install -e .[test] + + - name: Run tests + if: steps.changed-files-specific.outputs.only_changed != 'true' + run: | + pytest diff --git a/pytorch_geometric-2.4.0/.github/workflows/nightly.yml b/pytorch_geometric-2.4.0/.github/workflows/nightly.yml new file mode 100644 index 0000000..d214544 --- /dev/null +++ b/pytorch_geometric-2.4.0/.github/workflows/nightly.yml @@ -0,0 +1,44 @@ +name: Nightly Build + +on: # yamllint disable-line rule:truthy + workflow_dispatch: + schedule: + - cron: "0 6 * * *" # Everyday at 6:00am UTC/10:00pm PST + +jobs: + + build: + if: github.repository == 'pyg-team/pytorch_geometric' + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v4.3.0 + with: + python-version: 3.8 + + - name: Set version + run: echo "VERSION=$(sed -n "s/^__version__ = '\(.*\)'/\1/p" torch_geometric/__init__.py)" >> ${GITHUB_ENV} + + - name: Set time + run: echo "TODAY=$(date +'%Y%m%d')" >> ${GITHUB_ENV} + + - name: Customize build version + run: | + sed -i "s/$VERSION/$VERSION.dev$TODAY/" torch_geometric/__init__.py + sed -i '0,/name="torch_geometric"/s//name="pyg-nightly"/' pyproject.toml # Only change first occurence + sed -i "s/version=\"$VERSION\"/version=\"$VERSION.dev$TODAY\"/" pyproject.toml + + - name: Build package + run: | + pip install --upgrade build + python -m build + + - name: Publish package + uses: pypa/gh-action-pypi-publish@release/v1 + with: + user: __token__ + password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/pytorch_geometric-2.4.0/.github/workflows/prev_testing.yml b/pytorch_geometric-2.4.0/.github/workflows/prev_testing.yml new file mode 100644 index 0000000..d4c2472 --- /dev/null +++ b/pytorch_geometric-2.4.0/.github/workflows/prev_testing.yml @@ -0,0 +1,64 @@ +name: Testing previous PyTorch versions + +on: # yamllint disable-line rule:truthy + push: + branches: + - master + pull_request: + +jobs: + + prev_pytest: + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + torch-version: [1.11.0, 1.12.0, 1.13.0, 2.0.0] + include: + - torch-version: 1.11.0 + torchvision-version: 0.12.0 + - torch-version: 1.12.0 + torchvision-version: 0.13.0 + - torch-version: 1.13.0 + torchvision-version: 0.14.0 + - torch-version: 2.0.0 + torchvision-version: 0.15.0 + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 40 + + # Skip workflow if only certain files have been changed. + - name: Get changed files + id: changed-files-specific + uses: tj-actions/changed-files@v34 + with: + files: | + benchmark/** + conda/** + docker/** + docs/** + examples/** + graphgym/** + README.md + CHANGELOG.md + + - name: Setup packages + if: steps.changed-files-specific.outputs.only_changed != 'true' + uses: ./.github/actions/setup + with: + torch-version: ${{ matrix.torch-version }} + torchvision-version: ${{ matrix.torchvision-version }} + + - name: Install main package + if: steps.changed-files-specific.outputs.only_changed != 'true' + run: | + pip install -e .[full,test] + + - name: Run tests + if: steps.changed-files-specific.outputs.only_changed != 'true' + run: | + pytest diff --git a/pytorch_geometric-2.4.0/.github/workflows/testing.yml b/pytorch_geometric-2.4.0/.github/workflows/testing.yml new file mode 100644 index 0000000..0710eb1 --- /dev/null +++ b/pytorch_geometric-2.4.0/.github/workflows/testing.yml @@ -0,0 +1,53 @@ +name: Testing PyTorch 2.1 + +on: # yamllint disable-line rule:truthy + push: + branches: + - master + pull_request: + +jobs: + + pytest: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 40 + + # Skip workflow if only certain files have been changed. + - name: Get changed files + id: changed-files-specific + uses: tj-actions/changed-files@v34 + with: + files: | + benchmark/** + conda/** + docker/** + docs/** + examples/** + graphgym/** + README.md + CHANGELOG.md + + - name: Setup packages + if: steps.changed-files-specific.outputs.only_changed != 'true' + uses: ./.github/actions/setup + + - name: Install main package + if: steps.changed-files-specific.outputs.only_changed != 'true' + run: | + pip install -e .[full,test] + + - name: Run tests + if: steps.changed-files-specific.outputs.only_changed != 'true' + run: | + pytest --cov --cov-report=xml + + - name: Upload coverage + if: steps.changed-files-specific.outputs.only_changed != 'true' + uses: codecov/codecov-action@v2 + with: + fail_ci_if_error: false diff --git a/pytorch_geometric-2.3.1/.gitignore b/pytorch_geometric-2.4.0/.gitignore similarity index 100% rename from pytorch_geometric-2.3.1/.gitignore rename to pytorch_geometric-2.4.0/.gitignore diff --git a/pytorch_geometric-2.3.1/.pre-commit-config.yaml b/pytorch_geometric-2.4.0/.pre-commit-config.yaml similarity index 75% rename from pytorch_geometric-2.3.1/.pre-commit-config.yaml rename to pytorch_geometric-2.4.0/.pre-commit-config.yaml index 554d33c..3283a3f 100644 --- a/pytorch_geometric-2.3.1/.pre-commit-config.yaml +++ b/pytorch_geometric-2.4.0/.pre-commit-config.yaml @@ -4,6 +4,12 @@ ci: autoupdate_schedule: quarterly repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: no-commit-to-branch + name: No commits to master + - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: @@ -22,21 +28,21 @@ repos: )$ - repo: https://github.com/adrienverge/yamllint.git - rev: v1.29.0 + rev: v1.32.0 hooks: - id: yamllint name: Lint yaml args: [-d, '{extends: default, rules: {line-length: disable, document-start: disable, truthy: {level: error}, braces: {max-spaces-inside: 1}}}'] - - repo: https://github.com/regebro/pyroma - rev: "4.1" - hooks: - - id: pyroma - name: Check packaging - args: [--min=10, .] + # - repo: https://github.com/regebro/pyroma + # rev: "4.2" + # hooks: + # - id: pyroma + # name: Check packaging + # args: [--min=10, .] - repo: https://github.com/google/yapf - rev: v0.32.0 + rev: v0.40.2 hooks: - id: yapf name: Format code @@ -49,7 +55,8 @@ repos: name: Sort imports - repo: https://github.com/PyCQA/flake8 - rev: 6.0.0 + rev: 6.1.0 hooks: - id: flake8 name: Check PEP8 + additional_dependencies: [Flake8-pyproject] diff --git a/pytorch_geometric-2.4.0/CHANGELOG.md b/pytorch_geometric-2.4.0/CHANGELOG.md new file mode 100644 index 0000000..81b7608 --- /dev/null +++ b/pytorch_geometric-2.4.0/CHANGELOG.md @@ -0,0 +1,635 @@ +# Changelog + +All notable changes to this project will be documented in this file. +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). + +## [2.5.0] - 2023-MM-DD + +### Added + +### Changed + +### Removed + +## [2.4.0] - 2023-10-12 + +### Added + +- Add the ``ogc`` method as example ([#8168](https://github.com/pyg-team/pytorch_geometric/pull/8168)) +- Added a tutorial on `NeighborLoader` ([#7931](https://github.com/pyg-team/pytorch_geometric/pull/7931)) +- Added the option to override usage of `segment_matmul`/`grouped_matmul` via the `torch_geometric.backend.use_segment_matmul` flag ([#8148](https://github.com/pyg-team/pytorch_geometric/pull/8148)) +- Added support for PyTorch 2.1.0 ([#8134](https://github.com/pyg-team/pytorch_geometric/pull/8134)) +- Added the `NeuroGraphDataset` benchmark collection ([#8122](https://github.com/pyg-team/pytorch_geometric/pull/8122)) +- Added support for a node-level `mask` tensor in `dense_to_sparse` ([#8117](https://github.com/pyg-team/pytorch_geometric/pull/8117)) +- Added the `to_on_disk_dataset()` method to convert `InMemoryDataset` instances to `OnDiskDataset` instances ([#8116](https://github.com/pyg-team/pytorch_geometric/pull/8116)) +- Added `torch-frame` support ([#8110](https://github.com/pyg-team/pytorch_geometric/pull/8110), [#8118](https://github.com/pyg-team/pytorch_geometric/pull/8118), [#8151](https://github.com/pyg-team/pytorch_geometric/pull/8151), [#8152](https://github.com/pyg-team/pytorch_geometric/pull/8152)) +- Added the `DistLoader` base class ([#8079](https://github.com/pyg-team/pytorch_geometric/pull/8079)) +- Added `HyperGraphData` to support hypergraphs ([#7611](https://github.com/pyg-team/pytorch_geometric/pull/7611)) +- Added the `PCQM4Mv2` dataset as a reference implementation for `OnDiskDataset` ([#8102](https://github.com/pyg-team/pytorch_geometric/pull/8102)) +- Added `module_headers` property to `nn.Sequential` models ([#8093](https://github.com/pyg-team/pytorch_geometric/pull/8093)) +- Added `OnDiskDataset` interface with data loader support ([#8066](https://github.com/pyg-team/pytorch_geometric/pull/8066), [#8088](https://github.com/pyg-team/pytorch_geometric/pull/8088), [#8092](https://github.com/pyg-team/pytorch_geometric/pull/8092), [#8106](https://github.com/pyg-team/pytorch_geometric/pull/8106)) +- Added a tutorial for `Node2Vec` and `MetaPath2Vec` usage ([#7938](https://github.com/pyg-team/pytorch_geometric/pull/7938)) +- Added a tutorial for multi-GPU training with pure PyTorch ([#7894](https://github.com/pyg-team/pytorch_geometric/pull/7894)) +- Added `edge_attr` support to `ResGatedGraphConv` ([#8048](https://github.com/pyg-team/pytorch_geometric/pull/8048)) +- Added a `Database` interface and `SQLiteDatabase`/`RocksDatabase` implementations ([#8028](https://github.com/pyg-team/pytorch_geometric/pull/8028), [#8044](https://github.com/pyg-team/pytorch_geometric/pull/8044), [#8046](https://github.com/pyg-team/pytorch_geometric/pull/8046), [#8051](https://github.com/pyg-team/pytorch_geometric/pull/8051), [#8052](https://github.com/pyg-team/pytorch_geometric/pull/8052), [#8054](https://github.com/pyg-team/pytorch_geometric/pull/8054), [#8057](https://github.com/pyg-team/pytorch_geometric/pull/8057), [#8058](https://github.com/pyg-team/pytorch_geometric/pull/8058)) +- Added support for weighted/biased sampling in `NeighborLoader`/`LinkNeighborLoader` ([#8038](https://github.com/pyg-team/pytorch_geometric/pull/8038)) +- Added the `MixHopConv` layer and an corresponding example ([#8025](https://github.com/pyg-team/pytorch_geometric/pull/8025)) +- Added the option to pass keyword arguments to the underlying normalization layers within `BasicGNN` and `MLP` ([#8024](https://github.com/pyg-team/pytorch_geometric/pull/8024), [#8033](https://github.com/pyg-team/pytorch_geometric/pull/8033)) +- Added `IBMBNodeLoader` and `IBMBBatchLoader` data loaders ([#6230](https://github.com/pyg-team/pytorch_geometric/pull/6230)) +- Added the `NeuralFingerprint` model for learning fingerprints of molecules ([#7919](https://github.com/pyg-team/pytorch_geometric/pull/7919)) +- Added `SparseTensor` support to `WLConvContinuous`, `GeneralConv`, `PDNConv` and `ARMAConv` ([#8013](https://github.com/pyg-team/pytorch_geometric/pull/8013)) +- Added `LCMAggregation`, an implementation of Learnable Communitive Monoids, along with an example ([#7976](https://github.com/pyg-team/pytorch_geometric/pull/7976), [#8020](https://github.com/pyg-team/pytorch_geometric/pull/8020), [#8023](https://github.com/pyg-team/pytorch_geometric/pull/8023), [#8026](https://github.com/pyg-team/pytorch_geometric/pull/8026), [#8075](https://github.com/pyg-team/pytorch_geometric/pull/8075)) +- Added a warning for isolated/non-existing node types in `HeteroData.validate()` ([#7995](https://github.com/pyg-team/pytorch_geometric/pull/7995)) +- Added `utils.cumsum` implementation ([#7994](https://github.com/pyg-team/pytorch_geometric/pull/7994)) +- Added the `BrcaTcga` dataset ([#7905](https://github.com/pyg-team/pytorch_geometric/pull/7905)) +- Added the `MyketDataset` ([#7959](https://github.com/pyg-team/pytorch_geometric/pull/7959)) +- Added a multi-GPU `ogbn-papers100M` example ([#7921](https://github.com/pyg-team/pytorch_geometric/pull/7921)) +- Added `group_argsort` implementation ([#7948](https://github.com/pyg-team/pytorch_geometric/pull/7948)) +- Added `CachedLoader` implementation ([#7896](https://github.com/pyg-team/pytorch_geometric/pull/7896), [#7897](https://github.com/pyg-team/pytorch_geometric/pull/7897)) +- Added possibility to run training benchmarks on XPU device ([#7925](https://github.com/pyg-team/pytorch_geometric/pull/7925)) +- Added `utils.ppr` for personalized PageRank computation ([#7917](https://github.com/pyg-team/pytorch_geometric/pull/7917)) +- Added support for XPU device in `PrefetchLoader` ([#7918](https://github.com/pyg-team/pytorch_geometric/pull/7918)) +- Added support for floating-point slicing in `Dataset`, *e.g.*, `dataset[:0.9]` ([#7915](https://github.com/pyg-team/pytorch_geometric/pull/7915)) +- Added nightly GPU tests ([#7895](https://github.com/pyg-team/pytorch_geometric/pull/7895)) +- Added the `HalfHop` graph upsampling augmentation ([#7827](https://github.com/pyg-team/pytorch_geometric/pull/7827)) +- Added the `Wikidata5M` dataset ([#7864](https://github.com/pyg-team/pytorch_geometric/pull/7864)) +- Added TorchScript support inside `BasicGNN` models ([#7865](https://github.com/pyg-team/pytorch_geometric/pull/7865)) +- Added a `batch_size` argument to `unbatch` functionalities ([#7851](https://github.com/pyg-team/pytorch_geometric/pull/7851)) +- Added a distributed example using `graphlearn-for-pytorch` ([#7402](https://github.com/pyg-team/pytorch_geometric/pull/7402)) +- Integrate `neg_sampling_ratio` into `TemporalDataLoader` ([#7644](https://github.com/pyg-team/pytorch_geometric/pull/7644)) +- Added `faiss`-based `KNNINdex` classes for L2 or maximum inner product search ([#7842](https://github.com/pyg-team/pytorch_geometric/pull/7842)) +- Added the `OSE_GVCS` dataset ([#7811](https://github.com/pyg-team/pytorch_geometric/pull/7811)) +- Added `output_initializer` argument to `DimeNet` models ([#7774](https://github.com/pyg-team/pytorch_geometric/pull/7774), [#7780](https://github.com/pyg-team/pytorch_geometric/pull/7780)) +- Added `lexsort` implementation ([#7775](https://github.com/pyg-team/pytorch_geometric/pull/7775)) +- Added possibility to run inference benchmarks on XPU device ([#7705](https://github.com/pyg-team/pytorch_geometric/pull/7705)) +- Added `HeteroData` support in `to_networkx` ([#7713](https://github.com/pyg-team/pytorch_geometric/pull/7713)) +- Added `FlopsCount` support via `fvcore` ([#7693](https://github.com/pyg-team/pytorch_geometric/pull/7693)) +- Added back support for PyTorch >= 1.11.0 ([#7656](https://github.com/pyg-team/pytorch_geometric/pull/7656)) +- Added `Data.sort()` and `HeteroData.sort()` functionalities ([#7649](https://github.com/pyg-team/pytorch_geometric/pull/7649)) +- Added `torch.nested_tensor` support in `Data` and `Batch` ([#7643](https://github.com/pyg-team/pytorch_geometric/pull/7643), [#7647](https://github.com/pyg-team/pytorch_geometric/pull/7647)) +- Added `interval` argument to `Cartesian`, `LocalCartesian` and `Distance` transformations ([#7533](https://github.com/pyg-team/pytorch_geometric/pull/7533), [#7614](https://github.com/pyg-team/pytorch_geometric/pull/7614), [#7700](https://github.com/pyg-team/pytorch_geometric/pull/7700)) +- Added a `LightGCN` example on the `AmazonBook` dataset ([7603](https://github.com/pyg-team/pytorch_geometric/pull/7603)) +- Added a tutorial on hierarchical neighborhood sampling ([#7594](https://github.com/pyg-team/pytorch_geometric/pull/7594)) +- Enabled different attention modes in `HypergraphConv` via the `attention_mode` argument ([#7601](https://github.com/pyg-team/pytorch_geometric/pull/7601)) +- Added the `FilterEdges` graph coarsening operator ([#7361](https://github.com/pyg-team/pytorch_geometric/pull/7361)) +- Added the `DirGNN` model for learning on directed graphs ([#7458](https://github.com/pyg-team/pytorch_geometric/pull/7458)) +- Allow GPU tensors as input to `NodeLoader` and `LinkLoader` ([#7572](https://github.com/pyg-team/pytorch_geometric/pull/7572)) +- Added an `embedding_device` option to allow for GPU inference in `BasicGNN` ([#7548](https://github.com/pyg-team/pytorch_geometric/pull/7548), [#7829](https://github.com/pyg-team/pytorch_geometric/pull/7829)) +- Added `Performer` to `GPSConv` and remove `attn_dropout` argument from `GPSConv` ([#7465](https://github.com/pyg-team/pytorch_geometric/pull/7465)) +- Enabled `LinkNeighborLoader` to return number of sampled nodes and edges per hop ([#7516](https://github.com/pyg-team/pytorch_geometric/pull/7516)) +- Added the `HM` personalized fashion recommendation dataset ([#7515](https://github.com/pyg-team/pytorch_geometric/pull/7515)) +- Added the `GraphMixer` model ([#7501](https://github.com/pyg-team/pytorch_geometric/pull/7501), [#7459](https://github.com/pyg-team/pytorch_geometric/pull/7459)) +- Added the `disable_dynamic_shape` experimental flag ([#7246](https://github.com/pyg-team/pytorch_geometric/pull/7246), [#7534](https://github.com/pyg-team/pytorch_geometric/pull/7534)) +- Added the `MovieLens-1M` heterogeneous dataset ([#7479](https://github.com/pyg-team/pytorch_geometric/pull/7479)) +- Added a CPU-based and GPU-based `map_index` implementation ([#7493](https://github.com/pyg-team/pytorch_geometric/pull/7493), [#7764](https://github.com/pyg-team/pytorch_geometric/pull/7764) [#7765](https://github.com/pyg-team/pytorch_geometric/pull/7765)) +- Added the `AmazonBook` heterogeneous dataset ([#7483](https://github.com/pyg-team/pytorch_geometric/pull/7483)) +- Added hierarchical heterogeneous GraphSAGE example on OGB-MAG ([#7425](https://github.com/pyg-team/pytorch_geometric/pull/7425)) +- Added the `torch_geometric.distributed` package ([#7451](https://github.com/pyg-team/pytorch_geometric/pull/7451), [#7452](https://github.com/pyg-team/pytorch_geometric/pull/7452)), [#7482](https://github.com/pyg-team/pytorch_geometric/pull/7482), [#7502](https://github.com/pyg-team/pytorch_geometric/pull/7502), [#7628](https://github.com/pyg-team/pytorch_geometric/pull/7628), [#7671](https://github.com/pyg-team/pytorch_geometric/pull/7671), [#7846](https://github.com/pyg-team/pytorch_geometric/pull/7846), [#7715](https://github.com/pyg-team/pytorch_geometric/pull/7715), [#7974](https://github.com/pyg-team/pytorch_geometric/pull/7974)) +- Added the `GDELTLite` dataset ([#7442](https://github.com/pyg-team/pytorch_geometric/pull/7442)) +- Added the `approx_knn` function for approximated nearest neighbor search ([#7421](https://github.com/pyg-team/pytorch_geometric/pull/7421)) +- Added the `IGMCDataset` ([#7441](https://github.com/pyg-team/pytorch_geometric/pull/7441)) +- Added a sparse `cross_entropy` implementation ([#7447](https://github.com/pyg-team/pytorch_geometric/pull/7447), [#7466](https://github.com/pyg-team/pytorch_geometric/pull/7466)) +- Added the `MovieLens-100K` heterogeneous dataset ([#7398](https://github.com/pyg-team/pytorch_geometric/pull/7398)) +- Added the `PMLP` model and an example ([#7370](https://github.com/pyg-team/pytorch_geometric/pull/7370), [#7543](https://github.com/pyg-team/pytorch_geometric/pull/7543)) +- Added padding capabilities to `HeteroData.to_homogeneous()` in case feature dimensionalities do not match ([#7374](https://github.com/pyg-team/pytorch_geometric/pull/7374)) +- Added an optional `batch_size` argument to `fps`, `knn`, `knn_graph`, `radius` and `radius_graph` ([#7368](https://github.com/pyg-team/pytorch_geometric/pull/7368)) +- Added `PrefetchLoader` capabilities ([#7376](https://github.com/pyg-team/pytorch_geometric/pull/7376), [#7378](https://github.com/pyg-team/pytorch_geometric/pull/7378), [#7383](https://github.com/pyg-team/pytorch_geometric/pull/7383)) +- Added an example for hierarchical sampling ([#7244](https://github.com/pyg-team/pytorch_geometric/pull/7244)) +- Added Kùzu remote backend examples ([#7298](https://github.com/pyg-team/pytorch_geometric/pull/7298)) +- Added an optional `add_pad_mask` argument to the `Pad` transform ([#7339](https://github.com/pyg-team/pytorch_geometric/pull/7339)) +- Added `keep_inter_cluster_edges` option to `ClusterData` to support inter-subgraph edge connections when doing graph partitioning ([#7326](https://github.com/pyg-team/pytorch_geometric/pull/7326)) +- Unify graph pooling framework ([#7308](https://github.com/pyg-team/pytorch_geometric/pull/7308), [#7625](https://github.com/pyg-team/pytorch_geometric/pull/7625)) +- Added support for tuples as keys in `ModuleDict`/`ParameterDict` ([#7294](https://github.com/pyg-team/pytorch_geometric/pull/7294)) +- Added `NodePropertySplit` transform for creating node-level splits using structural node properties ([#6894](https://github.com/pyg-team/pytorch_geometric/pull/6894)) +- Added an option to preserve directed graphs in `CitationFull` datasets ([#7275](https://github.com/pyg-team/pytorch_geometric/pull/7275)) +- Added support for `torch.sparse.Tensor` in `DataLoader` ([#7252](https://github.com/pyg-team/pytorch_geometric/pull/7252)) +- Added `save` and `load` methods to `InMemoryDataset` ([#7250](https://github.com/pyg-team/pytorch_geometric/pull/7250), [#7413](https://github.com/pyg-team/pytorch_geometric/pull/7413)) +- Added an example for heterogeneous GNN explanation via `CaptumExplainer` ([#7096](https://github.com/pyg-team/pytorch_geometric/pull/7096)) +- Added `visualize_feature_importance` functionality to `HeteroExplanation` ([#7096](https://github.com/pyg-team/pytorch_geometric/pull/7096)) +- Added a `AddRemainingSelfLoops` transform ([#7192](https://github.com/pyg-team/pytorch_geometric/pull/7192)) +- Added `optimizer_resolver` ([#7209](https://github.com/pyg-team/pytorch_geometric/pull/7209)) +- Added `type_ptr` argument to `HeteroLayerNorm` ([#7208](https://github.com/pyg-team/pytorch_geometric/pull/7208)) +- Added an option to benchmark scripts to write PyTorch profiler results to CSV ([#7114](https://github.com/pyg-team/pytorch_geometric/pull/7114)) +- Added subgraph type sampling option with bidirectional edge support ([#7199](https://github.com/pyg-team/pytorch_geometric/pull/7199), [#7200](https://github.com/pyg-team/pytorch_geometric/pull/7200)) +- Added support for `"any"`-reductions in `scatter` ([#7198](https://github.com/pyg-team/pytorch_geometric/pull/7198)) +- Added manual sampling interface to `NodeLoader` and `LinkLoader` ([#7197](https://github.com/pyg-team/pytorch_geometric/pull/7197)) +- Extending `torch.sparse` support ([#7155](https://github.com/pyg-team/pytorch_geometric/pull/7155)) +- Added edge weight support to `LightGCN` ([#7157](https://github.com/pyg-team/pytorch_geometric/pull/7157)) +- Added `SparseTensor` support to `trim_to_layer` function ([#7089](https://github.com/pyg-team/pytorch_geometric/pull/7089)) +- Added instructions for ROCm build wheels ([#7143](https://github.com/pyg-team/pytorch_geometric/pull/7143)) +- Added a `ComposeFilters` class to compose `pre_filter` functions in `Dataset` ([#7097](https://github.com/pyg-team/pytorch_geometric/pull/7097)) +- Added a time-step aware variant of the `EllipticBitcoinDataset` called `EllipticBitcoinTemporalDataset` ([#7011](https://github.com/pyg-team/pytorch_geometric/pull/7011)) +- Added `to_dgl` and `from_dgl` conversion functions ([#7053](https://github.com/pyg-team/pytorch_geometric/pull/7053)) +- Added support for `torch.jit.script` within `MessagePassing` layers without `torch_sparse` being installed ([#7061](https://github.com/pyg-team/pytorch_geometric/pull/7061), [#7062](https://github.com/pyg-team/pytorch_geometric/pull/7062)) +- Added unbatching logic for `torch.sparse` tensors ([#7037](https://github.com/pyg-team/pytorch_geometric/pull/7037)) +- Added the `RotatE` KGE model ([#7026](https://github.com/pyg-team/pytorch_geometric/pull/7026)) +- Added support for Apple silicon GPU acceleration in some main examples ([#7770](https://github.com/pyg-team/pytorch_geometric/pull/7770), [#7711](https://github.com/pyg-team/pytorch_geometric/pull/7711), [#7784](https://github.com/pyg-team/pytorch_geometric/pull/7784), [#7785](https://github.com/pyg-team/pytorch_geometric/pull/7785)) + +### Changed + +- Fixed `HeteroConv` for layers that have a non-default argument order, *e.g.*, `GCN2Conv` ([#8166](https://github.com/pyg-team/pytorch_geometric/pull/8166)) +- Handle reserved keywords as keys in `ModuleDict` and `ParameterDict` ([#8163](https://github.com/pyg-team/pytorch_geometric/pull/8163)) +- Updated the examples and tutorials to account for `torch.compile(dynamic=True)` in PyTorch 2.1.0 ([#8145](https://github.com/pyg-team/pytorch_geometric/pull/8145)) +- Enabled dense eigenvalue computation in `AddLaplacianEigenvectorPE` for small-scale graphs ([#8143](https://github.com/pyg-team/pytorch_geometric/pull/8143)) +- Fix `DynamicBatchSampler.__len__` to raise an error in case `num_steps` is undefined ([#8137](https://github.com/pyg-team/pytorch_geometric/pull/8137)) +- Enabled pickling of `DimeNet` models ([#8019](https://github.com/pyg-team/pytorch_geometric/pull/8019)) +- Changed the `trim_to_layer` function to filter out non-reachable node and edge types when operating on heterogeneous graphs ([#7942](https://github.com/pyg-team/pytorch_geometric/pull/7942)) +- Accelerated and simplified `top_k` computation in `TopKPooling` ([#7737](https://github.com/pyg-team/pytorch_geometric/pull/7737)) +- Updated `GIN` implementation in kernel benchmarks to have sequential batchnorms ([#7955](https://github.com/pyg-team/pytorch_geometric/pull/7955)) +- Fixed bugs in benchmarks caused by a lack of the device conditions for CPU and unexpected `cache` argument in heterogeneous models ([#7956](https://github.com/pyg-team/pytorch_geometric/pull/7956) +- Fixed a bug in which `batch.e_id` was not correctly computed on unsorted graph inputs ([#7953](https://github.com/pyg-team/pytorch_geometric/pull/7953)) +- Fixed `from_networkx` conversion from `nx.stochastic_block_model` graphs ([#7941](https://github.com/pyg-team/pytorch_geometric/pull/7941)) +- Fixed the usage of `bias_initializer` in `HeteroLinear` ([#7923](https://github.com/pyg-team/pytorch_geometric/pull/7923)) +- Fixed broken links in `HGBDataset` ([#7907](https://github.com/pyg-team/pytorch_geometric/pull/7907)) +- Fixed an issue where `SetTransformerAggregation` produced `NaN` values for isolates nodes ([#7902](https://github.com/pyg-team/pytorch_geometric/pull/7902)) +- Fixed `model_summary` on modules with uninitialized parameters ([#7884](https://github.com/pyg-team/pytorch_geometric/pull/7884)) +- Updated `QM9` data pre-processing to include the SMILES string ([#7867](https://github.com/pyg-team/pytorch_geometric/pull/7867)) +- Fixed tracing of `add_self_loops` for a dynamic number of nodes ([#7330](https://github.com/pyg-team/pytorch_geometric/pull/7330)) +- Fixed device issue in `PNAConv.get_degree_histogram` ([#7830](https://github.com/pyg-team/pytorch_geometric/pull/7830)) +- Fixed the shape of `edge_label_time` when using temporal sampling on homogeneous graphs ([#7807](https://github.com/pyg-team/pytorch_geometric/pull/7807)) +- Moved `torch_geometric.contrib.explain.GraphMaskExplainer` to `torch_geometric.explain.algorithm.GraphMaskExplainer` ([#7779](https://github.com/pyg-team/pytorch_geometric/pull/7779)) +- Made `FieldStatus` enum picklable to avoid `PicklingError` in a multi-process setting ([#7808](https://github.com/pyg-team/pytorch_geometric/pull/7808)) +- Fixed `edge_label_index` computation in `LinkNeighborLoader` for the homogeneous+`disjoint` mode ([#7791](https://github.com/pyg-team/pytorch_geometric/pull/7791)) +- Fixed `CaptumExplainer` for `binary_classification` tasks ([#7787](https://github.com/pyg-team/pytorch_geometric/pull/7787)) +- Warn user when using the `training` flag in `to_hetero` modules ([#7772](https://github.com/pyg-team/pytorch_geometric/pull/7772)) +- Unchained exceptions raised when accessing non-existent data attributes for better readability ([#7734](https://github.com/pyg-team/pytorch_geometric/pull/7734)) +- Raise error when collecting non-existing attributes in `HeteroData` ([#7714](https://github.com/pyg-team/pytorch_geometric/pull/7714)) +- Renamed `dest` argument to `dst` in `utils.geodesic_distance` ([#7708](https://github.com/pyg-team/pytorch_geometric/pull/7708)) +- Changed `add_random_edge` to only add true negative edges ([#7654](https://github.com/pyg-team/pytorch_geometric/pull/7654)) +- Allowed the usage of `BasicGNN` models in `DeepGraphInfomax` ([#7648](https://github.com/pyg-team/pytorch_geometric/pull/7648)) +- Breaking Change: Made `Data.keys` a method rather than a property ([#7629](https://github.com/pyg-team/pytorch_geometric/pull/7629)) +- Added a `num_edges` parameter to the forward method of `HypergraphConv` ([#7560](https://github.com/pyg-team/pytorch_geometric/pull/7560)) +- Fixed `get_mesh_laplacian` for `normalization="sym"` ([#7544](https://github.com/pyg-team/pytorch_geometric/pull/7544)) +- Use `dim_size` to initialize output size of the `EquilibriumAggregation` layer ([#7530](https://github.com/pyg-team/pytorch_geometric/pull/7530)) +- Added a `max_num_elements` parameter to the forward method of `GraphMultisetTransformer`, `GRUAggregation`, `LSTMAggregation` and `SetTransformerAggregation` ([#7529](https://github.com/pyg-team/pytorch_geometric/pull/7529)) +- Fixed empty edge indices handling in `SparseTensor` ([#7519](https://github.com/pyg-team/pytorch_geometric/pull/7519)) +- Move the `scaler` tensor in `GeneralConv` to the correct device ([#7484](https://github.com/pyg-team/pytorch_geometric/pull/7484)) +- Fixed `HeteroLinear` bug when used via mixed precision ([#7473](https://github.com/pyg-team/pytorch_geometric/pull/7473)) +- All transforms are now immutable, i.e., they perform a shallow-copy of the data and therefore do not longer modify data in-place ([#7429](https://github.com/pyg-team/pytorch_geometric/pull/7429)) +- Set `output_size` in the `repeat_interleave` operation in `QuantileAggregation` ([#7426](https://github.com/pyg-team/pytorch_geometric/pull/7426)) +- Fixed gradient computation of edge weights in `utils.spmm` ([#7428](https://github.com/pyg-team/pytorch_geometric/pull/7428)) +- Re-factored `ClusterLoader` to integrate `pyg-lib` METIS routine ([#7416](https://github.com/pyg-team/pytorch_geometric/pull/7416)) +- Fixed an index-out-of-range bug in `QuantileAggregation` when `dim_size` is passed ([#7407](https://github.com/pyg-team/pytorch_geometric/pull/7407)) +- The `filter_per_worker` option will not get automatically inferred by default based on the device of the underlying data ([#7399](https://github.com/pyg-team/pytorch_geometric/pull/7399)) +- Fixed a bug in `LightGCN.recommendation_loss()` to only use the embeddings of the nodes involved in the current mini-batch ([#7384](https://github.com/pyg-team/pytorch_geometric/pull/7384)) +- Added an optional `max_num_elements` argument to `SortAggregation` ([#7367](https://github.com/pyg-team/pytorch_geometric/pull/7367)) +- Added the option to pass `fill_value` as a `torch.tensor` to `utils.to_dense_batch` ([#7367](https://github.com/pyg-team/pytorch_geometric/pull/7367)) +- Fixed a bug in which inputs where modified in-place in `to_hetero_with_bases` ([#7363](https://github.com/pyg-team/pytorch_geometric/pull/7363)) +- Do not load `node_default` and `edge_default` attributes in `from_networkx` ([#7348](https://github.com/pyg-team/pytorch_geometric/pull/7348)) +- Updated examples to use `NeighborLoader` instead of `NeighborSampler` ([#7152](https://github.com/pyg-team/pytorch_geometric/pull/7152)) +- Fixed `HGTConv` utility function `_construct_src_node_feat` ([#7194](https://github.com/pyg-team/pytorch_geometric/pull/7194)) +- Extend dataset summary to create stats for each node/edge type ([#7203](https://github.com/pyg-team/pytorch_geometric/pull/7203)) +- Added an optional `batch_size` argument to `avg_pool_x` and `max_pool_x` ([#7216](https://github.com/pyg-team/pytorch_geometric/pull/7216)) +- Fixed `subgraph` on unordered inputs ([#7187](https://github.com/pyg-team/pytorch_geometric/pull/7187)) +- Allow missing node types in `HeteroDictLinear` ([#7185](https://github.com/pyg-team/pytorch_geometric/pull/7185)) +- Optimized `from_networkx` memory footprint by reducing unnecessary copies ([#7119](https://github.com/pyg-team/pytorch_geometric/pull/7119)) +- Added an optional `batch_size` argument to `LayerNorm`, `GraphNorm`, `InstanceNorm`, `GraphSizeNorm` and `PairNorm` ([#7135](https://github.com/pyg-team/pytorch_geometric/pull/7135)) +- Improved code coverage ([#7093](https://github.com/pyg-team/pytorch_geometric/pull/7093), [#7195](https://github.com/pyg-team/pytorch_geometric/pull/7195)) +- Fix `numpy` incompatiblity when reading files for `Planetoid` datasets ([#7141](https://github.com/pyg-team/pytorch_geometric/pull/7141)) +- Added support for `Data.num_edges` for native `torch.sparse.Tensor` adjacency matrices ([#7104](https://github.com/pyg-team/pytorch_geometric/pull/7104)) +- Fixed crash of heterogeneous data loaders if node or edge types are missing ([#7060](https://github.com/pyg-team/pytorch_geometric/pull/7060), [#7087](https://github.com/pyg-team/pytorch_geometric/pull/7087)) +- Accelerated attention-based `MultiAggregation` ([#7077](https://github.com/pyg-team/pytorch_geometric/pull/7077)) +- Edges in `HeterophilousGraphDataset` are now undirected by default ([#7065](https://github.com/pyg-team/pytorch_geometric/pull/7065)) +- Fixed a bug in `FastHGTConv` that computed values via parameters used to compute the keys ([#7050](https://github.com/pyg-team/pytorch_geometric/pull/7050)) +- Accelerated sparse tensor conversion routines ([#7042](https://github.com/pyg-team/pytorch_geometric/pull/7042), [#7043](https://github.com/pyg-team/pytorch_geometric/pull/7043)) +- Change `torch_sparse.SparseTensor` logic to utilize `torch.sparse_csr` instead ([#7041](https://github.com/pyg-team/pytorch_geometric/pull/7041)) +- Added an optional `batch_size` and `max_num_nodes` arguments to `MemPooling` layer ([#7239](https://github.com/pyg-team/pytorch_geometric/pull/7239)) +- Fixed training issues of the GraphGPS example ([#7377](https://github.com/pyg-team/pytorch_geometric/pull/7377)) +- Allowed `CaptumExplainer` to be called multiple times in a row ([#7391](https://github.com/pyg-team/pytorch_geometric/pull/7391)) + +### Removed + +- Dropped Python 3.7 support ([#7939](https://github.com/pyg-team/pytorch_geometric/pull/7939)) +- Removed `layer_type` argument in `contrib.explain.GraphMaskExplainer` ([#7445](https://github.com/pyg-team/pytorch_geometric/pull/7445)) +- Replaced `FastHGTConv` with `HGTConv` ([#7117](https://github.com/pyg-team/pytorch_geometric/pull/7117)) + +## [2.3.0] - 2023-03-23 + +### Added + +- Added a memory-efficient `utils.one_hot` implementation ([#7005](https://github.com/pyg-team/pytorch_geometric/pull/7005)) +- Added `HeteroDictLinear` and an optimized `FastHGTConv` module ([#6178](https://github.com/pyg-team/pytorch_geometric/pull/6178), [#6998](https://github.com/pyg-team/pytorch_geometric/pull/6998)) +- Added the `DenseGATConv` module ([#6928](https://github.com/pyg-team/pytorch_geometric/pull/6928)) +- Added `trim_to_layer` utility function for more efficient `NeighborLoader` use-cases ([#6661](https://github.com/pyg-team/pytorch_geometric/pull/6661)) +- Added the `DistMult` KGE model ([#6958](https://github.com/pyg-team/pytorch_geometric/pull/6958)) +- Added `HeteroData.set_value_dict` functionality ([#6961](https://github.com/pyg-team/pytorch_geometric/pull/6961), [#6974](https://github.com/pyg-team/pytorch_geometric/pull/6974)) +- Added PyTorch >= 2.0 support ([#6934](https://github.com/pyg-team/pytorch_geometric/pull/6934), [#7000](https://github.com/pyg-team/pytorch_geometric/pull/7000)) +- Added PyTorch Lightning >= 2.0 support ([#6929](https://github.com/pyg-team/pytorch_geometric/pull/6929)) +- Added the `ComplEx` KGE model ([#6898](https://github.com/pyg-team/pytorch_geometric/pull/6898)) +- Added option to write benchmark results to csv ([#6888](https://github.com/pyg-team/pytorch_geometric/pull/6888)) +- Added `HeteroLayerNorm` and `HeteroBatchNorm` layers ([#6838](https://github.com/pyg-team/pytorch_geometric/pull/6838)) +- Added the `HeterophilousGraphDataset` suite ([#6846](https://github.com/pyg-team/pytorch_geometric/pull/6846)) +- Added support for sparse tensor in full batch mode inference benchmark ([#6843](https://github.com/pyg-team/pytorch_geometric/pull/6843)) +- Enabled `NeighborLoader` to return number of sampled nodes and edges per hop ([#6834](https://github.com/pyg-team/pytorch_geometric/pull/6834)) +- Added `ZipLoader` to execute multiple `NodeLoader` or `LinkLoader` instances ([#6829](https://github.com/pyg-team/pytorch_geometric/issues/6829)) +- Added common `utils.select` and `utils.narrow` functionality to support filtering of both tensors and lists ([#6162](https://github.com/pyg-team/pytorch_geometric/issues/6162)) +- Support `normalization` customization in `get_mesh_laplacian` ([#6790](https://github.com/pyg-team/pytorch_geometric/issues/6790)) +- Added the `TemporalEncoding` module ([#6785](https://github.com/pyg-team/pytorch_geometric/pull/6785)) +- Added CPU-optimized `spmm_reduce` functionality via CSR format ([#6699](https://github.com/pyg-team/pytorch_geometric/pull/6699), [#6759](https://github.com/pyg-team/pytorch_geometric/pull/6759)) +- Added support for the revised version of the `MD17` dataset ([#6734](https://github.com/pyg-team/pytorch_geometric/pull/6734)) +- Added TorchScript support to the `RECT_L` model ([#6727](https://github.com/pyg-team/pytorch_geometric/pull/6727)) +- Added TorchScript support to the `Node2Vec` model ([#6726](https://github.com/pyg-team/pytorch_geometric/pull/6726)) +- Added `utils.to_edge_index` to convert sparse tensors to edge indices and edge attributes ([#6728](https://github.com/pyg-team/pytorch_geometric/issues/6728)) +- Fixed expected data format in `PolBlogs` dataset ([#6714](https://github.com/pyg-team/pytorch_geometric/issues/6714)) +- Added `SimpleConv` to perform non-trainable propagation ([#6718](https://github.com/pyg-team/pytorch_geometric/pull/6718)) +- Added a `RemoveDuplicatedEdges` transform ([#6709](https://github.com/pyg-team/pytorch_geometric/pull/6709)) +- Added TorchScript support to the `LINKX` model ([#6712](https://github.com/pyg-team/pytorch_geometric/pull/6712)) +- Added `torch.jit` examples for `example/film.py` and `example/gcn.py`([#6602](https://github.com/pyg-team/pytorch_geometric/pull/6692)) +- Added `Pad` transform ([#5940](https://github.com/pyg-team/pytorch_geometric/pull/5940), [#6697](https://github.com/pyg-team/pytorch_geometric/pull/6697), [#6731](https://github.com/pyg-team/pytorch_geometric/pull/6731), [#6758](https://github.com/pyg-team/pytorch_geometric/pull/6758)) +- Added full batch mode to the inference benchmark ([#6631](https://github.com/pyg-team/pytorch_geometric/pull/6631)) +- Added `cat` aggregation type to the `HeteroConv` class so that features can be concatenated during grouping ([#6634](https://github.com/pyg-team/pytorch_geometric/pull/6634)) +- Added `torch.compile` support and benchmark study ([#6610](https://github.com/pyg-team/pytorch_geometric/pull/6610), [#6952](https://github.com/pyg-team/pytorch_geometric/pull/6952), [#6953](https://github.com/pyg-team/pytorch_geometric/pull/6953), [#6980](https://github.com/pyg-team/pytorch_geometric/pull/6980), [#6983](https://github.com/pyg-team/pytorch_geometric/pull/6983), [#6984](https://github.com/pyg-team/pytorch_geometric/pull/6984), [#6985](https://github.com/pyg-team/pytorch_geometric/pull/6985), [#6986](https://github.com/pyg-team/pytorch_geometric/pull/6986), [#6989](https://github.com/pyg-team/pytorch_geometric/pull/6989), [#7002](https://github.com/pyg-team/pytorch_geometric/pull/7002)) +- Added the `AntiSymmetricConv` layer ([#6577](https://github.com/pyg-team/pytorch_geometric/pull/6577)) +- Added a mixin for Huggingface model hub integration ([#5930](https://github.com/pyg-team/pytorch_geometric/pull/5930), [#6591](https://github.com/pyg-team/pytorch_geometric/pull/6591)) +- Added support for accelerated GNN layers in `nn.conv.cugraph` via `cugraph-ops` ([#6278](https://github.com/pyg-team/pytorch_geometric/pull/6278), [#6388](https://github.com/pyg-team/pytorch_geometric/pull/6388), [#6412](https://github.com/pyg-team/pytorch_geometric/pull/6412)) +- Added accelerated `index_sort` function from `pyg-lib` for faster sorting ([#6554](https://github.com/pyg-team/pytorch_geometric/pull/6554)) +- Fix incorrect device in `EquilibriumAggregration` ([#6560](https://github.com/pyg-team/pytorch_geometric/pull/6560)) +- Added bipartite graph support in `dense_to_sparse()` ([#6546](https://github.com/pyg-team/pytorch_geometric/pull/6546)) +- Add CPU affinity support for more data loaders ([#6534](https://github.com/pyg-team/pytorch_geometric/pull/6534), [#6922](https://github.com/pyg-team/pytorch_geometric/pull/6922)) +- Added the `BAMultiShapesDataset` ([#6541](https://github.com/pyg-team/pytorch_geometric/pull/6541)) +- Added the interfaces of a graph pooling framework ([#6540](https://github.com/pyg-team/pytorch_geometric/pull/6540)) +- Added automatic `n_id` and `e_id` attributes to mini-batches produced by `NodeLoader` and `LinkLoader` ([#6524](https://github.com/pyg-team/pytorch_geometric/pull/6524)) +- Added `PGMExplainer` to `torch_geometric.contrib` ([#6149](https://github.com/pyg-team/pytorch_geometric/pull/6149), [#6588](https://github.com/pyg-team/pytorch_geometric/pull/6588), [#6589](https://github.com/pyg-team/pytorch_geometric/pull/6589)) +- Added a `NumNeighbors` helper class for specifying the number of neighbors when sampling ([#6501](https://github.com/pyg-team/pytorch_geometric/pull/6501), [#6505](https://github.com/pyg-team/pytorch_geometric/pull/6505), [#6690](https://github.com/pyg-team/pytorch_geometric/pull/6690)) +- Added caching to `is_node_attr()` and `is_edge_attr()` calls ([#6492](https://github.com/pyg-team/pytorch_geometric/pull/6492)) +- Added `ToHeteroLinear` and `ToHeteroMessagePassing` modules to accelerate `to_hetero` functionality ([#5992](https://github.com/pyg-team/pytorch_geometric/pull/5992), [#6456](https://github.com/pyg-team/pytorch_geometric/pull/6456)) +- Added `GraphMaskExplainer` ([#6284](https://github.com/pyg-team/pytorch_geometric/pull/6284)) +- Added the `GRBCD` and `PRBCD` adversarial attack models ([#5972](https://github.com/pyg-team/pytorch_geometric/pull/5972)) +- Added `dropout` option to `SetTransformer` and `GraphMultisetTransformer` ([#6484](https://github.com/pyg-team/pytorch_geometric/pull/6484)) +- Added option to customize loader arguments for evaluation in `LightningNodeData` and `LightningLinkData` ([#6450](https://github.com/pyg-team/pytorch_geometric/pull/6450), [#6456](https://github.com/pyg-team/pytorch_geometric/pull/6456)) +- Added option to customize `num_neighbors` in `NeighborSampler` after instantiation ([#6446](https://github.com/pyg-team/pytorch_geometric/pull/6446)) +- Added the `Taobao` dataset and a corresponding example for it ([#6144](https://github.com/pyg-team/pytorch_geometric/pull/6144)) +- Added `pyproject.toml` ([#6431](https://github.com/pyg-team/pytorch_geometric/pull/6431)) +- Added the `torch_geometric.contrib` sub-package ([#6422](https://github.com/pyg-team/pytorch_geometric/pull/6422)) +- Warn on using latest documentation ([#6418](https://github.com/pyg-team/pytorch_geometric/pull/6418)) +- Added basic `pyright` type checker support ([#6415](https://github.com/pyg-team/pytorch_geometric/pull/6415)) +- Added a new external resource for link prediction ([#6396](https://github.com/pyg-team/pytorch_geometric/pull/6396)) +- Added `CaptumExplainer` ([#6383](https://github.com/pyg-team/pytorch_geometric/pull/6383), [#6387](https://github.com/pyg-team/pytorch_geometric/pull/6387), [#6433](https://github.com/pyg-team/pytorch_geometric/pull/6433), [#6487](https://github.com/pyg-team/pytorch_geometric/pull/6487), [#6966](https://github.com/pyg-team/pytorch_geometric/pull/6966)) +- Added support for custom `HeteroData` mini-batch class in remote backends ([#6377](https://github.com/pyg-team/pytorch_geometric/pull/6377)) +- Added the `GNNFF` model ([#5866](https://github.com/pyg-team/pytorch_geometric/pull/5866)) +- Added `MLPAggregation`, `SetTransformerAggregation`, `GRUAggregation`, and `DeepSetsAggregation` as adaptive readout functions ([#6301](https://github.com/pyg-team/pytorch_geometric/pull/6301), [#6336](https://github.com/pyg-team/pytorch_geometric/pull/6336), [#6338](https://github.com/pyg-team/pytorch_geometric/pull/6338)) +- Added `Dataset.to_datapipe` for converting PyG datasets into a torchdata `DataPipe`([#6141](https://github.com/pyg-team/pytorch_geometric/pull/6141)) +- Added `to_nested_tensor` and `from_nested_tensor` functionality ([#6329](https://github.com/pyg-team/pytorch_geometric/pull/6329), [#6330](https://github.com/pyg-team/pytorch_geometric/pull/6330), [#6331](https://github.com/pyg-team/pytorch_geometric/pull/6331), [#6332](https://github.com/pyg-team/pytorch_geometric/pull/6332)) +- Added the `GPSConv` Graph Transformer layer and example ([#6326](https://github.com/pyg-team/pytorch_geometric/pull/6326), [#6327](https://github.com/pyg-team/pytorch_geometric/pull/6327)) +- Added `networkit` conversion utilities ([#6321](https://github.com/pyg-team/pytorch_geometric/pull/6321)) +- Added global dataset attribute access via `dataset.{attr_name}` ([#6319](https://github.com/pyg-team/pytorch_geometric/pull/6319)) +- Added the `TransE` KGE model and example ([#6314](https://github.com/pyg-team/pytorch_geometric/pull/6314)) +- Added the Freebase `FB15k_237` dataset ([#3204](https://github.com/pyg-team/pytorch_geometric/pull/3204)) +- Added `Data.update()` and `HeteroData.update()` functionality ([#6313](https://github.com/pyg-team/pytorch_geometric/pull/6313)) +- Added `PGExplainer` ([#6204](https://github.com/pyg-team/pytorch_geometric/pull/6204)) +- Added the `AirfRANS` dataset ([#6287](https://github.com/pyg-team/pytorch_geometric/pull/6287)) +- Added `AttentionExplainer` ([#6279](https://github.com/pyg-team/pytorch_geometric/pull/6279)) +- Added (un)faithfulness explainability metric ([#6090](https://github.com/pyg-team/pytorch_geometric/pull/6090)) +- Added fidelity explainability metric ([#6116](https://github.com/pyg-team/pytorch_geometric/pull/6116), [#6510](https://github.com/pyg-team/pytorch_geometric/pull/6510)) +- Added subgraph visualization of GNN explanations ([#6235](https://github.com/pyg-team/pytorch_geometric/pull/6235), [#6271](https://github.com/pyg-team/pytorch_geometric/pull/6271)) +- Added weighted negative sampling option in `LinkNeighborLoader` ([#6264](https://github.com/pyg-team/pytorch_geometric/pull/6264)) +- Added the `BA2MotifDataset` explainer dataset ([#6257](https://github.com/pyg-team/pytorch_geometric/pull/6257)) +- Added `CycleMotif` motif generator to generate `n`-node cycle shaped motifs ([#6256](https://github.com/pyg-team/pytorch_geometric/pull/6256)) +- Added the `InfectionDataset` to evaluate explanations ([#6222](https://github.com/pyg-team/pytorch_geometric/pull/6222)) +- Added `characterization_score` and `fidelity_curve_auc` explainer metrics ([#6188](https://github.com/pyg-team/pytorch_geometric/pull/6188)) +- Added `get_message_passing_embeddings` ([#6201](https://github.com/pyg-team/pytorch_geometric/pull/6201)) +- Added the `PointGNNConv` layer ([#6194](https://github.com/pyg-team/pytorch_geometric/pull/6194)) +- Added `GridGraph` graph generator to generate grid graphs ([#6220](https://github.com/pyg-team/pytorch_geometric/pull/6220) +- Added explainability metrics for when ground truth is available ([#6137](https://github.com/pyg-team/pytorch_geometric/pull/6137)) +- Added `visualize_feature_importance` to support node feature visualizations ([#6094](https://github.com/pyg-team/pytorch_geometric/pull/6094)) +- Added heterogeneous graph support to `Explanation` framework ([#6091](https://github.com/pyg-team/pytorch_geometric/pull/6091), [#6218](https://github.com/pyg-team/pytorch_geometric/pull/6218)) +- Added a `CustomMotif` motif generator ([#6179](https://github.com/pyg-team/pytorch_geometric/pull/6179)) +- Added `ERGraph` graph generator to generate Ergos-Renyi (ER) graphs ([#6073](https://github.com/pyg-team/pytorch_geometric/pull/6073)) +- Added `BAGraph` graph generator to generate Barabasi-Albert graphs - the usage of `datasets.BAShapes` is now deprecated ([#6072](https://github.com/pyg-team/pytorch_geometric/pull/6072) +- Added explainability benchmark dataset framework ([#6104](https://github.com/pyg-team/pytorch_geometric/pull/6104)) +- Added `seed_time` attribute to temporal `NodeLoader` outputs in case `input_time` is given ([#6196](https://github.com/pyg-team/pytorch_geometric/pull/6196)) +- Added `Data.edge_subgraph` and `HeteroData.edge_subgraph` functionalities ([#6193](https://github.com/pyg-team/pytorch_geometric/pull/6193)) +- Added `input_time` option to `LightningNodeData` and `transform_sampler_output` to `NodeLoader` and `LinkLoader` ([#6187](https://github.com/pyg-team/pytorch_geometric/pull/6187)) +- Added `summary` for PyG/PyTorch models ([#5859](https://github.com/pyg-team/pytorch_geometric/pull/5859), [#6161](https://github.com/pyg-team/pytorch_geometric/pull/6161)) +- Started adding `torch.sparse` support to PyG ([#5906](https://github.com/pyg-team/pytorch_geometric/pull/5906), [#5944](https://github.com/pyg-team/pytorch_geometric/pull/5944), [#6003](https://github.com/pyg-team/pytorch_geometric/pull/6003), [#6033](https://github.com/pyg-team/pytorch_geometric/pull/6033), [#6514](https://github.com/pyg-team/pytorch_geometric/pull/6514), [#6532](https://github.com/pyg-team/pytorch_geometric/pull/6532), [#6748](https://github.com/pyg-team/pytorch_geometric/pull/6748), [#6847](https://github.com/pyg-team/pytorch_geometric/pull/6847), [#6868](https://github.com/pyg-team/pytorch_geometric/pull/6868), [#6874](https://github.com/pyg-team/pytorch_geometric/pull/6874), [#6897](https://github.com/pyg-team/pytorch_geometric/pull/6897), [#6930](https://github.com/pyg-team/pytorch_geometric/pull/6930), [#6932](https://github.com/pyg-team/pytorch_geometric/pull/6932), [#6936](https://github.com/pyg-team/pytorch_geometric/pull/6936), [#6937](https://github.com/pyg-team/pytorch_geometric/pull/6937), [#6939](https://github.com/pyg-team/pytorch_geometric/pull/6939), [#6947](https://github.com/pyg-team/pytorch_geometric/pull/6947), [#6950](https://github.com/pyg-team/pytorch_geometric/pull/6950), [#6951](https://github.com/pyg-team/pytorch_geometric/pull/6951), [#6957](https://github.com/pyg-team/pytorch_geometric/pull/6957)) +- Add `inputs_channels` back in training benchmark ([#6154](https://github.com/pyg-team/pytorch_geometric/pull/6154)) +- Added support for dropping nodes in `utils.to_dense_batch` in case `max_num_nodes` is smaller than the number of nodes ([#6124](https://github.com/pyg-team/pytorch_geometric/pull/6124)) +- Added the RandLA-Net architecture as an example ([#5117](https://github.com/pyg-team/pytorch_geometric/pull/5117)) + +### Changed + +- Migrate to `pyproject.toml` for packaging ([#6880](https://github.com/pyg-team/pytorch_geometric/pull/6880)) +- Drop internal usage of `__dunder__` names ([#6999](https://github.com/pyg-team/pytorch_geometric/issues/6999)) +- Changed the interface of `sort_edge_index`, `coalesce` and `to_undirected` to only return single `edge_index` information in case the `edge_attr` argument is not specified ([#6875](https://github.com/pyg-team/pytorch_geometric/issues/6875), [#6879](https://github.com/pyg-team/pytorch_geometric/issues/6879), [#6893](https://github.com/pyg-team/pytorch_geometric/issues/6893)) +- Fixed a bug in `to_hetero` when using an uninitialized submodule without implementing `reset_parameters` ([#6863](https://github.com/pyg-team/pytorch_geometric/issues/6790)) +- Fixed a bug in `get_mesh_laplacian` ([#6790](https://github.com/pyg-team/pytorch_geometric/issues/6790)) +- Fixed a bug in which masks were not properly masked in `GNNExplainer` on link prediction tasks ([#6787](https://github.com/pyg-team/pytorch_geometric/pull/6787)) +- Allow the usage of `ChebConv` within `GNNExplainer` ([#6778](https://github.com/pyg-team/pytorch_geometric/pull/6778)) +- Allow setting the `EdgeStorage.num_edges` property ([#6710](https://github.com/pyg-team/pytorch_geometric/pull/6710)) +- Fixed a bug in `utils.bipartite_subgraph()` and updated docs of `HeteroData.subgraph()` ([#6654](https://github.com/pyg-team/pytorch_geometric/pull/6654)) +- Properly reset the `data_list` cache of an `InMemoryDataset` when accessing `dataset.data` ([#6685](https://github.com/pyg-team/pytorch_geometric/pull/6685)) +- Fixed a bug in `Data.subgraph()` and `HeteroData.subgraph()` ([#6613](https://github.com/pyg-team/pytorch_geometric/pull/6613)) +- Fixed a bug in `PNAConv` and `DegreeScalerAggregation` to correctly incorporate degree statistics of isolated nodes ([#6609](https://github.com/pyg-team/pytorch_geometric/pull/6609)) +- Improved code coverage ([#6523](https://github.com/pyg-team/pytorch_geometric/pull/6523), [#6538](https://github.com/pyg-team/pytorch_geometric/pull/6538), [#6555](https://github.com/pyg-team/pytorch_geometric/pull/6555), [#6558](https://github.com/pyg-team/pytorch_geometric/pull/6558), [#6568](https://github.com/pyg-team/pytorch_geometric/pull/6568), [#6573](https://github.com/pyg-team/pytorch_geometric/pull/6573), [#6578](https://github.com/pyg-team/pytorch_geometric/pull/6578), [#6597](https://github.com/pyg-team/pytorch_geometric/pull/6597), [#6600](https://github.com/pyg-team/pytorch_geometric/pull/6600), [#6618](https://github.com/pyg-team/pytorch_geometric/pull/6618), [#6619](https://github.com/pyg-team/pytorch_geometric/pull/6619), [#6621](https://github.com/pyg-team/pytorch_geometric/pull/6621), [#6623](https://github.com/pyg-team/pytorch_geometric/pull/6623), [#6637](https://github.com/pyg-team/pytorch_geometric/pull/6637), [#6638](https://github.com/pyg-team/pytorch_geometric/pull/6638), [#6640](https://github.com/pyg-team/pytorch_geometric/pull/6640), [#6645](https://github.com/pyg-team/pytorch_geometric/pull/6645), [#6648](https://github.com/pyg-team/pytorch_geometric/pull/6648), [#6647](https://github.com/pyg-team/pytorch_geometric/pull/6647), [#6653](https://github.com/pyg-team/pytorch_geometric/pull/6653), [#6657](https://github.com/pyg-team/pytorch_geometric/pull/6657), [#6662](https://github.com/pyg-team/pytorch_geometric/pull/6662), [#6664](https://github.com/pyg-team/pytorch_geometric/pull/6664), [#6667](https://github.com/pyg-team/pytorch_geometric/pull/6667), [#6668](https://github.com/pyg-team/pytorch_geometric/pull/6668), [#6669](https://github.com/pyg-team/pytorch_geometric/pull/6669), [#6670](https://github.com/pyg-team/pytorch_geometric/pull/6670), [#6671](https://github.com/pyg-team/pytorch_geometric/pull/6671), [#6673](https://github.com/pyg-team/pytorch_geometric/pull/6673), [#6675](https://github.com/pyg-team/pytorch_geometric/pull/6675), [#6676](https://github.com/pyg-team/pytorch_geometric/pull/6676), [#6677](https://github.com/pyg-team/pytorch_geometric/pull/6677), [#6678](https://github.com/pyg-team/pytorch_geometric/pull/6678), [#6681](https://github.com/pyg-team/pytorch_geometric/pull/6681), [#6683](https://github.com/pyg-team/pytorch_geometric/pull/6683), [#6703](https://github.com/pyg-team/pytorch_geometric/pull/6703), [#6720](https://github.com/pyg-team/pytorch_geometric/pull/6720), [#6735](https://github.com/pyg-team/pytorch_geometric/pull/6735), [#6736](https://github.com/pyg-team/pytorch_geometric/pull/6736), [#6763](https://github.com/pyg-team/pytorch_geometric/pull/6763), [#6781](https://github.com/pyg-team/pytorch_geometric/pull/6781), [#6797](https://github.com/pyg-team/pytorch_geometric/pull/6797), [#6799](https://github.com/pyg-team/pytorch_geometric/pull/6799), [#6824](https://github.com/pyg-team/pytorch_geometric/pull/6824), [#6858](https://github.com/pyg-team/pytorch_geometric/pull/6858)) +- Fixed a bug in which `data.to_heterogeneous()` filtered attributs in the wrong dimension ([#6522](https://github.com/pyg-team/pytorch_geometric/pull/6522)) +- Breaking Change: Temporal sampling will now also sample nodes with an equal timestamp to the seed time (requires `pyg-lib>0.1.0`) ([#6517](https://github.com/pyg-team/pytorch_geometric/pull/6517)) +- Changed `DataLoader` workers with affinity to start at `cpu0` ([#6512](https://github.com/pyg-team/pytorch_geometric/pull/6512)) +- Allow 1D input to `global_*_pool` functions ([#6504](https://github.com/pyg-team/pytorch_geometric/pull/6504)) +- Add information about dynamic shapes in `RGCNConv` ([#6482](https://github.com/pyg-team/pytorch_geometric/pull/6482)) +- Fixed the use of types removed in `numpy 1.24.0` ([#6495](https://github.com/pyg-team/pytorch_geometric/pull/6495)) +- Fixed keyword parameters in `examples/mnist_voxel_grid.py` ([#6478](https://github.com/pyg-team/pytorch_geometric/pull/6478)) +- Unified `LightningNodeData` and `LightningLinkData` code paths ([#6473](https://github.com/pyg-team/pytorch_geometric/pull/6473)) +- Allow indices with any integer type in `RGCNConv` ([#6463](https://github.com/pyg-team/pytorch_geometric/pull/6463)) +- Re-structured the documentation ([#6420](https://github.com/pyg-team/pytorch_geometric/pull/6420), [#6423](https://github.com/pyg-team/pytorch_geometric/pull/6423), [#6429](https://github.com/pyg-team/pytorch_geometric/pull/6429), [#6440](https://github.com/pyg-team/pytorch_geometric/pull/6440), [#6443](https://github.com/pyg-team/pytorch_geometric/pull/6443), [#6445](https://github.com/pyg-team/pytorch_geometric/pull/6445), [#6452](https://github.com/pyg-team/pytorch_geometric/pull/6452), [#6453](https://github.com/pyg-team/pytorch_geometric/pull/6453), [#6458](https://github.com/pyg-team/pytorch_geometric/pull/6458), [#6459](https://github.com/pyg-team/pytorch_geometric/pull/6459), [#6460](https://github.com/pyg-team/pytorch_geometric/pull/6460), [#6490](https://github.com/pyg-team/pytorch_geometric/pull/6490), [#6491](https://github.com/pyg-team/pytorch_geometric/pull/6491), [#6693](https://github.com/pyg-team/pytorch_geometric/pull/6693), [#6744](https://github.com/pyg-team/pytorch_geometric/pull/6744)) +- Fix the default arguments of `DataParallel` class ([#6376](https://github.com/pyg-team/pytorch_geometric/pull/6376)) +- Fix `ImbalancedSampler` on sliced `InMemoryDataset` ([#6374](https://github.com/pyg-team/pytorch_geometric/pull/6374)) +- Breaking Change: Changed the interface and implementation of `GraphMultisetTransformer` ([#6343](https://github.com/pyg-team/pytorch_geometric/pull/6343)) +- Fixed the approximate PPR variant in `transforms.GDC` to not crash on graphs with isolated nodes ([#6242](https://github.com/pyg-team/pytorch_geometric/pull/6242)) +- Added a warning when accesing `InMemoryDataset.data` ([#6318](https://github.com/pyg-team/pytorch_geometric/pull/6318)) +- Drop `SparseTensor` dependency in `GraphStore` ([#5517](https://github.com/pyg-team/pytorch_geometric/pull/5517)) +- Replace `NeighborSampler` with `NeighborLoader` in the distributed sampling example ([#6204](https://github.com/pyg-team/pytorch_geometric/pull/6307)) +- Fixed the filtering of node features in `transforms.RemoveIsolatedNodes` ([#6308](https://github.com/pyg-team/pytorch_geometric/pull/6308)) +- Fixed a bug in `DimeNet` that causes a output dimension mismatch ([#6305](https://github.com/pyg-team/pytorch_geometric/pull/6305)) +- Fixed `Data.to_heterogeneous()` with empty `edge_index` ([#6304](https://github.com/pyg-team/pytorch_geometric/pull/6304)) +- Unify `Explanation.node_mask` and `Explanation.node_feat_mask` ([#6267](https://github.com/pyg-team/pytorch_geometric/pull/6267)) +- Moved thresholding config of the `Explainer` to `Explanation` ([#6215](https://github.com/pyg-team/pytorch_geometric/pull/6215)) +- Fixed a bug in the output order in `HeteroLinear` for un-sorted type vectors ([#6198](https://github.com/pyg-team/pytorch_geometric/pull/6198)) +- Breaking Change: Move `ExplainerConfig` arguments to the `Explainer` class ([#6176](https://github.com/pyg-team/pytorch_geometric/pull/6176)) +- Refactored `NeighborSampler` to be input-type agnostic ([#6173](https://github.com/pyg-team/pytorch_geometric/pull/6173)) +- Infer correct CUDA device ID in `profileit` decorator ([#6164](https://github.com/pyg-team/pytorch_geometric/pull/6164)) +- Correctly use edge weights in `GDC` example ([#6159](https://github.com/pyg-team/pytorch_geometric/pull/6159)) +- Breaking Change: Moved PyTorch Lightning data modules to `torch_geometric.data.lightning` ([#6140](https://github.com/pyg-team/pytorch_geometric/pull/6140)) +- Make `torch_sparse` an optional dependency ([#6132](https://github.com/pyg-team/pytorch_geometric/pull/6132), [#6134](https://github.com/pyg-team/pytorch_geometric/pull/6134), [#6138](https://github.com/pyg-team/pytorch_geometric/pull/6138), [#6139](https://github.com/pyg-team/pytorch_geometric/pull/6139), [#7387](https://github.com/pyg-team/pytorch_geometric/pull/7387)) +- Optimized `utils.softmax` implementation ([#6113](https://github.com/pyg-team/pytorch_geometric/pull/6113), [#6155](https://github.com/pyg-team/pytorch_geometric/pull/6155), [#6805](https://github.com/pyg-team/pytorch_geometric/pull/6805)) +- Optimized `topk` implementation for large enough graphs ([#6123](https://github.com/pyg-team/pytorch_geometric/pull/6123)) + +### Removed + +- `torch-sparse` is now an optional dependency ([#6625](https://github.com/pyg-team/pytorch_geometric/pull/6625), [#6626](https://github.com/pyg-team/pytorch_geometric/pull/6626), [#6627](https://github.com/pyg-team/pytorch_geometric/pull/6627), [#6628](https://github.com/pyg-team/pytorch_geometric/pull/6628), [#6629](https://github.com/pyg-team/pytorch_geometric/pull/6629), [#6630](https://github.com/pyg-team/pytorch_geometric/pull/6630)) +- Removed most of the `torch-scatter` dependencies ([#6394](https://github.com/pyg-team/pytorch_geometric/pull/6394), [#6395](https://github.com/pyg-team/pytorch_geometric/pull/6395), [#6399](https://github.com/pyg-team/pytorch_geometric/pull/6399), [#6400](https://github.com/pyg-team/pytorch_geometric/pull/6400), [#6615](https://github.com/pyg-team/pytorch_geometric/pull/6615), [#6617](https://github.com/pyg-team/pytorch_geometric/pull/6617)) +- Removed the deprecated classes `GNNExplainer` and `Explainer` from `nn.models` ([#6382](https://github.com/pyg-team/pytorch_geometric/pull/6382)) +- Removed `target_index` argument in the `Explainer` interface ([#6270](https://github.com/pyg-team/pytorch_geometric/pull/6270)) +- Removed `Aggregation.set_validate_args` option ([#6175](https://github.com/pyg-team/pytorch_geometric/pull/6175)) + +## [2.2.0] - 2022-12-01 + +### Added + +- Extended `GNNExplainer` to support edge level explanations ([#6056](https://github.com/pyg-team/pytorch_geometric/pull/6056), [#6083](https://github.com/pyg-team/pytorch_geometric/pull/6083)) +- Added CPU affinitization for `NodeLoader` ([#6005](https://github.com/pyg-team/pytorch_geometric/pull/6005)) +- Added triplet sampling in `LinkNeighborLoader` ([#6004](https://github.com/pyg-team/pytorch_geometric/pull/6004)) +- Added `FusedAggregation` of simple scatter reductions ([#6036](https://github.com/pyg-team/pytorch_geometric/pull/6036)) +- Added a `to_smiles` function ([#6038](https://github.com/pyg-team/pytorch_geometric/pull/6038)) +- Added option to make normalization coefficients trainable in `PNAConv` ([#6039](https://github.com/pyg-team/pytorch_geometric/pull/6039)) +- Added `semi_grad` option in `VarAggregation` and `StdAggregation` ([#6042](https://github.com/pyg-team/pytorch_geometric/pull/6042)) +- Allow for fused aggregations in `MultiAggregation` ([#6036](https://github.com/pyg-team/pytorch_geometric/pull/6036), [#6040](https://github.com/pyg-team/pytorch_geometric/pull/6040)) +- Added `HeteroData` support for `to_captum_model` and added `to_captum_input` ([#5934](https://github.com/pyg-team/pytorch_geometric/pull/5934)) +- Added `HeteroData` support in `RandomNodeLoader` ([#6007](https://github.com/pyg-team/pytorch_geometric/pull/6007)) +- Added bipartite `GraphSAGE` example ([#5834](https://github.com/pyg-team/pytorch_geometric/pull/5834)) +- Added `LRGBDataset` to include 5 datasets from the [Long Range Graph Benchmark](https://openreview.net/pdf?id=in7XC5RcjEn) ([#5935](https://github.com/pyg-team/pytorch_geometric/pull/5935)) +- Added a warning for invalid node and edge type names in `HeteroData` ([#5990](https://github.com/pyg-team/pytorch_geometric/pull/5990)) +- Added PyTorch 1.13 support ([#5975](https://github.com/pyg-team/pytorch_geometric/pull/5975)) +- Added `int32` support in `NeighborLoader` ([#5948](https://github.com/pyg-team/pytorch_geometric/pull/5948)) +- Add `dgNN` support and `FusedGATConv` implementation ([#5140](https://github.com/pyg-team/pytorch_geometric/pull/5140)) +- Added `lr_scheduler_solver` and customized `lr_scheduler` classes ([#5942](https://github.com/pyg-team/pytorch_geometric/pull/5942)) +- Add `to_fixed_size` graph transformer ([#5939](https://github.com/pyg-team/pytorch_geometric/pull/5939)) +- Add support for symbolic tracing of `SchNet` model ([#5938](https://github.com/pyg-team/pytorch_geometric/pull/5938)) +- Add support for customizable interaction graph in `SchNet` model ([#5919](https://github.com/pyg-team/pytorch_geometric/pull/5919)) +- Started adding `torch.sparse` support to PyG ([#5906](https://github.com/pyg-team/pytorch_geometric/pull/5906), [#5944](https://github.com/pyg-team/pytorch_geometric/pull/5944), [#6003](https://github.com/pyg-team/pytorch_geometric/pull/6003), [#6633](https://github.com/pyg-team/pytorch_geometric/pull/6633)) +- Added `HydroNet` water cluster dataset ([#5537](https://github.com/pyg-team/pytorch_geometric/pull/5537), [#5902](https://github.com/pyg-team/pytorch_geometric/pull/5902), [#5903](https://github.com/pyg-team/pytorch_geometric/pull/5903)) +- Added explainability support for heterogeneous GNNs ([#5886](https://github.com/pyg-team/pytorch_geometric/pull/5886)) +- Added `SparseTensor` support to `SuperGATConv` ([#5888](https://github.com/pyg-team/pytorch_geometric/pull/5888)) +- Added TorchScript support for `AttentiveFP `([#5868](https://github.com/pyg-team/pytorch_geometric/pull/5868)) +- Added `num_steps` argument to training and inference benchmarks ([#5898](https://github.com/pyg-team/pytorch_geometric/pull/5898)) +- Added `torch.onnx.export` support ([#5877](https://github.com/pyg-team/pytorch_geometric/pull/5877), [#5997](https://github.com/pyg-team/pytorch_geometric/pull/5997)) +- Enable VTune ITT in inference and training benchmarks ([#5830](https://github.com/pyg-team/pytorch_geometric/pull/5830), [#5878](https://github.com/pyg-team/pytorch_geometric/pull/5878)) +- Add training benchmark ([#5774](https://github.com/pyg-team/pytorch_geometric/pull/5774)) +- Added a "Link Prediction on MovieLens" Colab notebook ([#5823](https://github.com/pyg-team/pytorch_geometric/pull/5823)) +- Added custom `sampler` support in `LightningDataModule` ([#5820](https://github.com/pyg-team/pytorch_geometric/pull/5820)) +- Added a `return_semantic_attention_weights` argument `HANConv` ([#5787](https://github.com/pyg-team/pytorch_geometric/pull/5787)) +- Added `disjoint` argument to `NeighborLoader` and `LinkNeighborLoader` ([#5775](https://github.com/pyg-team/pytorch_geometric/pull/5775)) +- Added support for `input_time` in `NeighborLoader` ([#5763](https://github.com/pyg-team/pytorch_geometric/pull/5763)) +- Added `disjoint` mode for temporal `LinkNeighborLoader` ([#5717](https://github.com/pyg-team/pytorch_geometric/pull/5717)) +- Added `HeteroData` support for `transforms.Constant` ([#5700](https://github.com/pyg-team/pytorch_geometric/pull/5700)) +- Added `np.memmap` support in `NeighborLoader` ([#5696](https://github.com/pyg-team/pytorch_geometric/pull/5696)) +- Added `assortativity` that computes degree assortativity coefficient ([#5587](https://github.com/pyg-team/pytorch_geometric/pull/5587)) +- Added `SSGConv` layer ([#5599](https://github.com/pyg-team/pytorch_geometric/pull/5599)) +- Added `shuffle_node`, `mask_feature` and `add_random_edge` augmentation methdos ([#5548](https://github.com/pyg-team/pytorch_geometric/pull/5548)) +- Added `dropout_path` augmentation that drops edges from a graph based on random walks ([#5531](https://github.com/pyg-team/pytorch_geometric/pull/5531)) +- Add support for filling labels with dummy values in `HeteroData.to_homogeneous()` ([#5540](https://github.com/pyg-team/pytorch_geometric/pull/5540)) +- Added `temporal_strategy` option to `neighbor_sample` ([#5576](https://github.com/pyg-team/pyg-lib/pull/5576)) +- Added `torch_geometric.sampler` package to docs ([#5563](https://github.com/pyg-team/pytorch_geometric/pull/5563)) +- Added the `DGraphFin` dynamic graph dataset ([#5504](https://github.com/pyg-team/pytorch_geometric/pull/5504)) +- Added `dropout_edge` augmentation that randomly drops edges from a graph - the usage of `dropout_adj` is now deprecated ([#5495](https://github.com/pyg-team/pytorch_geometric/pull/5495)) +- Added `dropout_node` augmentation that randomly drops nodes from a graph ([#5481](https://github.com/pyg-team/pytorch_geometric/pull/5481)) +- Added `AddRandomMetaPaths` that adds edges based on random walks along a metapath ([#5397](https://github.com/pyg-team/pytorch_geometric/pull/5397)) +- Added `WLConvContinuous` for performing WL refinement with continuous attributes ([#5316](https://github.com/pyg-team/pytorch_geometric/pull/5316)) +- Added `print_summary` method for the `torch_geometric.data.Dataset` interface ([#5438](https://github.com/pyg-team/pytorch_geometric/pull/5438)) +- Added `sampler` support to `LightningDataModule` ([#5456](https://github.com/pyg-team/pytorch_geometric/pull/5456), [#5457](https://github.com/pyg-team/pytorch_geometric/pull/5457)) +- Added official splits to `MalNetTiny` dataset ([#5078](https://github.com/pyg-team/pytorch_geometric/pull/5078)) +- Added `IndexToMask` and `MaskToIndex` transforms ([#5375](https://github.com/pyg-team/pytorch_geometric/pull/5375), [#5455](https://github.com/pyg-team/pytorch_geometric/pull/5455)) +- Added `FeaturePropagation` transform ([#5387](https://github.com/pyg-team/pytorch_geometric/pull/5387)) +- Added `PositionalEncoding` ([#5381](https://github.com/pyg-team/pytorch_geometric/pull/5381)) +- Consolidated sampler routines behind `torch_geometric.sampler`, enabling ease of extensibility in the future ([#5312](https://github.com/pyg-team/pytorch_geometric/pull/5312), [#5365](https://github.com/pyg-team/pytorch_geometric/pull/5365), [#5402](https://github.com/pyg-team/pytorch_geometric/pull/5402), [#5404](https://github.com/pyg-team/pytorch_geometric/pull/5404)), [#5418](https://github.com/pyg-team/pytorch_geometric/pull/5418)) +- Added `pyg-lib` neighbor sampling ([#5384](https://github.com/pyg-team/pytorch_geometric/pull/5384), [#5388](https://github.com/pyg-team/pytorch_geometric/pull/5388)) +- Added `pyg_lib.segment_matmul` integration within `HeteroLinear` ([#5330](https://github.com/pyg-team/pytorch_geometric/pull/5330), [#5347](https://github.com/pyg-team/pytorch_geometric/pull/5347))) +- Enabled `bf16` support in benchmark scripts ([#5293](https://github.com/pyg-team/pytorch_geometric/pull/5293), [#5341](https://github.com/pyg-team/pytorch_geometric/pull/5341)) +- Added `Aggregation.set_validate_args` option to skip validation of `dim_size` ([#5290](https://github.com/pyg-team/pytorch_geometric/pull/5290)) +- Added `SparseTensor` support to inference and training benchmark suite ([#5242](https://github.com/pyg-team/pytorch_geometric/pull/5242), [#5258](https://github.com/pyg-team/pytorch_geometric/pull/5258), [#5881](https://github.com/pyg-team/pytorch_geometric/pull/5881)) +- Added experimental mode in inference benchmarks ([#5254](https://github.com/pyg-team/pytorch_geometric/pull/5254)) +- Added node classification example instrumented with [Weights and Biases (W&B) logging](https://wandb.com) and [W&B Sweeps](https://wandb.com/sweeps) ([#5192](https://github.com/pyg-team/pytorch_geometric/pull/5192)) +- Added experimental mode for `utils.scatter` ([#5232](https://github.com/pyg-team/pytorch_geometric/pull/5232), [#5241](https://github.com/pyg-team/pytorch_geometric/pull/5241), [#5386](https://github.com/pyg-team/pytorch_geometric/pull/5386)) +- Added missing test labels in `HGBDataset` ([#5233](https://github.com/pyg-team/pytorch_geometric/pull/5233)) +- Added `BaseStorage.get()` functionality ([#5240](https://github.com/pyg-team/pytorch_geometric/pull/5240)) +- Added a test to confirm that `to_hetero` works with `SparseTensor` ([#5222](https://github.com/pyg-team/pytorch_geometric/pull/5222)) +- Added `torch_geometric.explain` module with base functionality for explainability methods ([#5804](https://github.com/pyg-team/pytorch_geometric/pull/5804), [#6054](https://github.com/pyg-team/pytorch_geometric/pull/6054), [#6089](https://github.com/pyg-team/pytorch_geometric/pull/6089)) + +### Changed + +- Moved and adapted `GNNExplainer` from `torch_geometric.nn` to `torch_geometric.explain.algorithm` ([#5967](https://github.com/pyg-team/pytorch_geometric/pull/5967), [#6065](https://github.com/pyg-team/pytorch_geometric/pull/6065)) +- Optimized scatter implementations for CPU/GPU, both with and without backward computation ([#6051](https://github.com/pyg-team/pytorch_geometric/pull/6051), [#6052](https://github.com/pyg-team/pytorch_geometric/pull/6052)) +- Support temperature value in `dense_mincut_pool` ([#5908](https://github.com/pyg-team/pytorch_geometric/pull/5908)) +- Fixed a bug in which `VirtualNode` mistakenly treated node features as edge features ([#5819](https://github.com/pyg-team/pytorch_geometric/pull/5819)) +- Fixed `setter` and `getter` handling in `BaseStorage` ([#5815](https://github.com/pyg-team/pytorch_geometric/pull/5815)) +- Fixed `path` in `hetero_conv_dblp.py` example ([#5686](https://github.com/pyg-team/pytorch_geometric/pull/5686)) +- Fix `auto_select_device` routine in GraphGym for PyTorch Lightning>=1.7 ([#5677](https://github.com/pyg-team/pytorch_geometric/pull/5677)) +- Support `in_channels` with `tuple` in `GENConv` for bipartite message passing ([#5627](https://github.com/pyg-team/pytorch_geometric/pull/5627), [#5641](https://github.com/pyg-team/pytorch_geometric/pull/5641)) +- Handle cases of not having enough possible negative edges in `RandomLinkSplit` ([#5642](https://github.com/pyg-team/pytorch_geometric/pull/5642)) +- Fix `RGCN+pyg-lib` for `LongTensor` input ([#5610](https://github.com/pyg-team/pytorch_geometric/pull/5610)) +- Improved type hint support ([#5842](https://github.com/pyg-team/pytorch_geometric/pull/5842), [#5603](https://github.com/pyg-team/pytorch_geometric/pull/5603), [#5659](https://github.com/pyg-team/pytorch_geometric/pull/5659), [#5664](https://github.com/pyg-team/pytorch_geometric/pull/5664), [#5665](https://github.com/pyg-team/pytorch_geometric/pull/5665), [#5666](https://github.com/pyg-team/pytorch_geometric/pull/5666), [#5667](https://github.com/pyg-team/pytorch_geometric/pull/5667), [#5668](https://github.com/pyg-team/pytorch_geometric/pull/5668), [#5669](https://github.com/pyg-team/pytorch_geometric/pull/5669), [#5673](https://github.com/pyg-team/pytorch_geometric/pull/5673), [#5675](https://github.com/pyg-team/pytorch_geometric/pull/5675), [#5673](https://github.com/pyg-team/pytorch_geometric/pull/5676), [#5678](https://github.com/pyg-team/pytorch_geometric/pull/5678), [#5682](https://github.com/pyg-team/pytorch_geometric/pull/5682), [#5683](https://github.com/pyg-team/pytorch_geometric/pull/5683), [#5684](https://github.com/pyg-team/pytorch_geometric/pull/5684), [#5685](https://github.com/pyg-team/pytorch_geometric/pull/5685), [#5687](https://github.com/pyg-team/pytorch_geometric/pull/5687), [#5688](https://github.com/pyg-team/pytorch_geometric/pull/5688), [#5695](https://github.com/pyg-team/pytorch_geometric/pull/5695), [#5699](https://github.com/pyg-team/pytorch_geometric/pull/5699), [#5701](https://github.com/pyg-team/pytorch_geometric/pull/5701), [#5702](https://github.com/pyg-team/pytorch_geometric/pull/5702), [#5703](https://github.com/pyg-team/pytorch_geometric/pull/5703), [#5706](https://github.com/pyg-team/pytorch_geometric/pull/5706), [#5707](https://github.com/pyg-team/pytorch_geometric/pull/5707), [#5710](https://github.com/pyg-team/pytorch_geometric/pull/5710), [#5714](https://github.com/pyg-team/pytorch_geometric/pull/5714), [#5715](https://github.com/pyg-team/pytorch_geometric/pull/5715), [#5716](https://github.com/pyg-team/pytorch_geometric/pull/5716), [#5722](https://github.com/pyg-team/pytorch_geometric/pull/5722), [#5724](https://github.com/pyg-team/pytorch_geometric/pull/5724), [#5725](https://github.com/pyg-team/pytorch_geometric/pull/5725), [#5726](https://github.com/pyg-team/pytorch_geometric/pull/5726), [#5729](https://github.com/pyg-team/pytorch_geometric/pull/5729), [#5730](https://github.com/pyg-team/pytorch_geometric/pull/5730), [#5731](https://github.com/pyg-team/pytorch_geometric/pull/5731), [#5732](https://github.com/pyg-team/pytorch_geometric/pull/5732), [#5733](https://github.com/pyg-team/pytorch_geometric/pull/5733), [#5743](https://github.com/pyg-team/pytorch_geometric/pull/5743), [#5734](https://github.com/pyg-team/pytorch_geometric/pull/5734), [#5735](https://github.com/pyg-team/pytorch_geometric/pull/5735), [#5736](https://github.com/pyg-team/pytorch_geometric/pull/5736), [#5737](https://github.com/pyg-team/pytorch_geometric/pull/5737), [#5738](https://github.com/pyg-team/pytorch_geometric/pull/5738), [#5747](https://github.com/pyg-team/pytorch_geometric/pull/5747), [#5752](https://github.com/pyg-team/pytorch_geometric/pull/5752), [#5753](https://github.com/pyg-team/pytorch_geometric/pull/5753), [#5754](https://github.com/pyg-team/pytorch_geometric/pull/5754), [#5756](https://github.com/pyg-team/pytorch_geometric/pull/5756), [#5757](https://github.com/pyg-team/pytorch_geometric/pull/5757), [#5758](https://github.com/pyg-team/pytorch_geometric/pull/5758), [#5760](https://github.com/pyg-team/pytorch_geometric/pull/5760), [#5766](https://github.com/pyg-team/pytorch_geometric/pull/5766), [#5767](https://github.com/pyg-team/pytorch_geometric/pull/5767), [#5768](https://github.com/pyg-team/pytorch_geometric/pull/5768)), [#5781](https://github.com/pyg-team/pytorch_geometric/pull/5781), [#5778](https://github.com/pyg-team/pytorch_geometric/pull/5778), [#5797](https://github.com/pyg-team/pytorch_geometric/pull/5797), [#5798](https://github.com/pyg-team/pytorch_geometric/pull/5798), [#5799](https://github.com/pyg-team/pytorch_geometric/pull/5799), [#5800](https://github.com/pyg-team/pytorch_geometric/pull/5800), [#5806](https://github.com/pyg-team/pytorch_geometric/pull/5806), [#5810](https://github.com/pyg-team/pytorch_geometric/pull/5810), [#5811](https://github.com/pyg-team/pytorch_geometric/pull/5811), [#5828](https://github.com/pyg-team/pytorch_geometric/pull/5828), [#5847](https://github.com/pyg-team/pytorch_geometric/pull/5847), [#5851](https://github.com/pyg-team/pytorch_geometric/pull/5851), [#5852](https://github.com/pyg-team/pytorch_geometric/pull/5852)) +- Avoid modifying `mode_kwargs` in `MultiAggregation` ([#5601](https://github.com/pyg-team/pytorch_geometric/pull/5601)) +- Changed `BatchNorm` to allow for batches of size one during training ([#5530](https://github.com/pyg-team/pytorch_geometric/pull/5530), [#5614](https://github.com/pyg-team/pytorch_geometric/pull/5614)) +- Integrated better temporal sampling support by requiring that local neighborhoods are sorted according to time ([#5516](https://github.com/pyg-team/pytorch_geometric/issues/5516), [#5602](https://github.com/pyg-team/pytorch_geometric/issues/5602)) +- Fixed a bug when applying several scalers with `PNAConv` ([#5514](https://github.com/pyg-team/pytorch_geometric/issues/5514)) +- Allow `.` in `ParameterDict` key names ([#5494](https://github.com/pyg-team/pytorch_geometric/pull/5494)) +- Renamed `drop_unconnected_nodes` to `drop_unconnected_node_types` and `drop_orig_edges` to `drop_orig_edge_types` in `AddMetapaths` ([#5490](https://github.com/pyg-team/pytorch_geometric/pull/5490)) +- Improved `utils.scatter` performance by explicitly choosing better implementation for `add` and `mean` reduction ([#5399](https://github.com/pyg-team/pytorch_geometric/pull/5399)) +- Fix `to_dense_adj` with empty `edge_index` ([#5476](https://github.com/pyg-team/pytorch_geometric/pull/5476)) +- The `AttentionalAggregation` module can now be applied to compute attentin on a per-feature level ([#5449](https://github.com/pyg-team/pytorch_geometric/pull/5449)) +- Ensure equal lenghts of `num_neighbors` across edge types in `NeighborLoader` ([#5444](https://github.com/pyg-team/pytorch_geometric/pull/5444)) +- Fixed a bug in `TUDataset` in which node features were wrongly constructed whenever `node_attributes` only hold a single feature (_e.g._, in `PROTEINS`) ([#5441](https://github.com/pyg-team/pytorch_geometric/pull/5441)) +- Breaking change: removed `num_neighbors` as an attribute of loader ([#5404](https://github.com/pyg-team/pytorch_geometric/pull/5404)) +- `ASAPooling` is now jittable ([#5395](https://github.com/pyg-team/pytorch_geometric/pull/5395)) +- Updated unsupervised `GraphSAGE` example to leverage `LinkNeighborLoader` ([#5317](https://github.com/pyg-team/pytorch_geometric/pull/5317)) +- Replace in-place operations with out-of-place ones to align with `torch.scatter_reduce` API ([#5353](https://github.com/pyg-team/pytorch_geometric/pull/5353)) +- Breaking bugfix: `PointTransformerConv` now correctly uses `sum` aggregation ([#5332](https://github.com/pyg-team/pytorch_geometric/pull/5332)) +- Improve out-of-bounds error message in `MessagePassing` ([#5339](https://github.com/pyg-team/pytorch_geometric/pull/5339)) +- Allow file names of a `Dataset` to be specified as either property and method ([#5338](https://github.com/pyg-team/pytorch_geometric/pull/5338)) +- Fixed separating a list of `SparseTensor` within `InMemoryDataset` ([#5299](https://github.com/pyg-team/pytorch_geometric/pull/5299)) +- Improved name resolving of normalization layers ([#5277](https://github.com/pyg-team/pytorch_geometric/pull/5277)) +- Fail gracefully on `GLIBC` errors within `torch-spline-conv` ([#5276](https://github.com/pyg-team/pytorch_geometric/pull/5276)) +- Fixed `Dataset.num_classes` in case a `transform` modifies `data.y` ([#5274](https://github.com/pyg-team/pytorch_geometric/pull/5274)) +- Allow customization of the activation function within `PNAConv` ([#5262](https://github.com/pyg-team/pytorch_geometric/pull/5262)) +- Do not fill `InMemoryDataset` cache on `dataset.num_features` ([#5264](https://github.com/pyg-team/pytorch_geometric/pull/5264)) +- Changed tests relying on `dblp` datasets to instead use synthetic data ([#5250](https://github.com/pyg-team/pytorch_geometric/pull/5250)) +- Fixed a bug for the initialization of activation function examples in `custom_graphgym` ([#5243](https://github.com/pyg-team/pytorch_geometric/pull/5243)) +- Allow any integer tensors when checking edge_index input to message passing ([5281](https://github.com/pyg-team/pytorch_geometric/pull/5281)) + +### Removed + +- Removed `scatter_reduce` option from experimental mode ([#5399](https://github.com/pyg-team/pytorch_geometric/pull/5399)) + +## [2.1.0] - 2022-08-17 + +### Added + +- Added the test for `DeepGCNLayer` ([#5704](https://github.com/pyg-team/pytorch_geometric/pull/5704)) +- Allow `.` in `ModuleDict` key names ([#5227](https://github.com/pyg-team/pytorch_geometric/pull/5227)) +- Added `edge_label_time` argument to `LinkNeighborLoader` ([#5137](https://github.com/pyg-team/pytorch_geometric/pull/5137), [#5173](https://github.com/pyg-team/pytorch_geometric/pull/5173)) +- Let `ImbalancedSampler` accept `torch.Tensor` as input ([#5138](https://github.com/pyg-team/pytorch_geometric/pull/5138)) +- Added `flow` argument to `gcn_norm` to correctly normalize the adjacency matrix in `GCNConv` ([#5149](https://github.com/pyg-team/pytorch_geometric/pull/5149)) +- `NeighborSampler` supports graphs without edges ([#5072](https://github.com/pyg-team/pytorch_geometric/pull/5072)) +- Added the `MeanSubtractionNorm` layer ([#5068](https://github.com/pyg-team/pytorch_geometric/pull/5068)) +- Added `pyg_lib.segment_matmul` integration within `RGCNConv` ([#5052](https://github.com/pyg-team/pytorch_geometric/pull/5052), [#5096](https://github.com/pyg-team/pytorch_geometric/pull/5096)) +- Support `SparseTensor` as edge label in `LightGCN` (#[5046](https://github.com/pyg-team/pytorch_geometric/issues/5046)) +- Added support for `BasicGNN` models within `to_hetero` ([#5091](https://github.com/pyg-team/pytorch_geometric/pull/5091)) +- Added support for computing weighted metapaths in `AddMetapaths` ([#5049](https://github.com/pyg-team/pytorch_geometric/pull/5049)) +- Added inference benchmark suite ([#4915](https://github.com/pyg-team/pytorch_geometric/pull/4915)) +- Added a dynamically sized batch sampler for filling a mini-batch with a variable number of samples up to a maximum size ([#4972](https://github.com/pyg-team/pytorch_geometric/pull/4972)) +- Added fine grained options for setting `bias` and `dropout` per layer in the `MLP` model ([#4981](https://github.com/pyg-team/pytorch_geometric/pull/4981)) +- Added `EdgeCNN` model ([#4991](https://github.com/pyg-team/pytorch_geometric/pull/4991)) +- Added scalable `inference` mode in `BasicGNN` with layer-wise neighbor loading ([#4977](https://github.com/pyg-team/pytorch_geometric/pull/4977)) +- Added inference benchmarks ([#4892](https://github.com/pyg-team/pytorch_geometric/pull/4892), [#5107](https://github.com/pyg-team/pytorch_geometric/pull/5107)) +- Added PyTorch 1.12 support ([#4975](https://github.com/pyg-team/pytorch_geometric/pull/4975)) +- Added `unbatch_edge_index` functionality for splitting an `edge_index` tensor according to a `batch` vector ([#4903](https://github.com/pyg-team/pytorch_geometric/pull/4903)) +- Added node-wise normalization mode in `LayerNorm` ([#4944](https://github.com/pyg-team/pytorch_geometric/pull/4944)) +- Added support for `normalization_resolver` ([#4926](https://github.com/pyg-team/pytorch_geometric/pull/4926), [#4951](https://github.com/pyg-team/pytorch_geometric/pull/4951), [#4958](https://github.com/pyg-team/pytorch_geometric/pull/4958), [#4959](https://github.com/pyg-team/pytorch_geometric/pull/4959)) +- Added notebook tutorial for `torch_geometric.nn.aggr` package to documentation ([#4927](https://github.com/pyg-team/pytorch_geometric/pull/4927)) +- Added support for `follow_batch` for lists or dictionaries of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) +- Added `Data.validate()` and `HeteroData.validate()` functionality ([#4885](https://github.com/pyg-team/pytorch_geometric/pull/4885)) +- Added `LinkNeighborLoader` support to `LightningDataModule` ([#4868](https://github.com/pyg-team/pytorch_geometric/pull/4868)) +- Added `predict()` support to the `LightningNodeData` module ([#4884](https://github.com/pyg-team/pytorch_geometric/pull/4884)) +- Added `time_attr` argument to `LinkNeighborLoader` ([#4877](https://github.com/pyg-team/pytorch_geometric/pull/4877), [#4908](https://github.com/pyg-team/pytorch_geometric/pull/4908)) +- Added a `filter_per_worker` argument to data loaders to allow filtering of data within sub-processes ([#4873](https://github.com/pyg-team/pytorch_geometric/pull/4873)) +- Added a `NeighborLoader` benchmark script ([#4815](https://github.com/pyg-team/pytorch_geometric/pull/4815), [#4862](https://github.com/pyg-team/pytorch_geometric/pull/4862/files)) +- Added support for `FeatureStore` and `GraphStore` in `NeighborLoader` ([#4817](https://github.com/pyg-team/pytorch_geometric/pull/4817), [#4851](https://github.com/pyg-team/pytorch_geometric/pull/4851), [#4854](https://github.com/pyg-team/pytorch_geometric/pull/4854), [#4856](https://github.com/pyg-team/pytorch_geometric/pull/4856), [#4857](https://github.com/pyg-team/pytorch_geometric/pull/4857), [#4882](https://github.com/pyg-team/pytorch_geometric/pull/4882), [#4883](https://github.com/pyg-team/pytorch_geometric/pull/4883), [#4929](https://github.com/pyg-team/pytorch_geometric/pull/4929), [#4992](https://github.com/pyg-team/pytorch_geometric/pull/4922), [#4962](https://github.com/pyg-team/pytorch_geometric/pull/4962), [#4968](https://github.com/pyg-team/pytorch_geometric/pull/4968), [#5037](https://github.com/pyg-team/pytorch_geometric/pull/5037), [#5088](https://github.com/pyg-team/pytorch_geometric/pull/5088), [#5270](https://github.com/pyg-team/pytorch_geometric/pull/5270), [#5307](https://github.com/pyg-team/pytorch_geometric/pull/5307), [#5318](https://github.com/pyg-team/pytorch_geometric/pull/5318)) +- Added a `normalize` parameter to `dense_diff_pool` ([#4847](https://github.com/pyg-team/pytorch_geometric/pull/4847)) +- Added `size=None` explanation to jittable `MessagePassing` modules in the documentation ([#4850](https://github.com/pyg-team/pytorch_geometric/pull/4850)) +- Added documentation to the `DataLoaderIterator` class ([#4838](https://github.com/pyg-team/pytorch_geometric/pull/4838)) +- Added `GraphStore` support to `Data` and `HeteroData` ([#4816](https://github.com/pyg-team/pytorch_geometric/pull/4816)) +- Added `FeatureStore` support to `Data` and `HeteroData` ([#4807](https://github.com/pyg-team/pytorch_geometric/pull/4807), [#4853](https://github.com/pyg-team/pytorch_geometric/pull/4853)) +- Added `FeatureStore` and `GraphStore` abstractions ([#4534](https://github.com/pyg-team/pytorch_geometric/pull/4534), [#4568](https://github.com/pyg-team/pytorch_geometric/pull/4568), [#5120](https://github.com/pyg-team/pytorch_geometric/pull/5120)) +- Added support for dense aggregations in `global_*_pool` ([#4827](https://github.com/pyg-team/pytorch_geometric/pull/4827)) +- Added Python version requirement ([#4825](https://github.com/pyg-team/pytorch_geometric/pull/4825)) +- Added TorchScript support to `JumpingKnowledge` module ([#4805](https://github.com/pyg-team/pytorch_geometric/pull/4805)) +- Added a `max_sample` argument to `AddMetaPaths` in order to tackle very dense metapath edges ([#4750](https://github.com/pyg-team/pytorch_geometric/pull/4750)) +- Test `HANConv` with empty tensors ([#4756](https://github.com/pyg-team/pytorch_geometric/pull/4756), [#4841](https://github.com/pyg-team/pytorch_geometric/pull/4841)) +- Added the `bias` vector to the `GCN` model definition in the "Create Message Passing Networks" tutorial ([#4755](https://github.com/pyg-team/pytorch_geometric/pull/4755)) +- Added `transforms.RootedSubgraph` interface with two implementations: `RootedEgoNets` and `RootedRWSubgraph` ([#3926](https://github.com/pyg-team/pytorch_geometric/pull/3926)) +- Added `ptr` vectors for `follow_batch` attributes within `Batch.from_data_list` ([#4723](https://github.com/pyg-team/pytorch_geometric/pull/4723)) +- Added `torch_geometric.nn.aggr` package ([#4687](https://github.com/pyg-team/pytorch_geometric/pull/4687), [#4721](https://github.com/pyg-team/pytorch_geometric/pull/4721), [#4731](https://github.com/pyg-team/pytorch_geometric/pull/4731), [#4762](https://github.com/pyg-team/pytorch_geometric/pull/4762), [#4749](https://github.com/pyg-team/pytorch_geometric/pull/4749), [#4779](https://github.com/pyg-team/pytorch_geometric/pull/4779), [#4863](https://github.com/pyg-team/pytorch_geometric/pull/4863), [#4864](https://github.com/pyg-team/pytorch_geometric/pull/4864), [#4865](https://github.com/pyg-team/pytorch_geometric/pull/4865), [#4866](https://github.com/pyg-team/pytorch_geometric/pull/4866), [#4872](https://github.com/pyg-team/pytorch_geometric/pull/4872), [#4934](https://github.com/pyg-team/pytorch_geometric/pull/4934), [#4935](https://github.com/pyg-team/pytorch_geometric/pull/4935), [#4957](https://github.com/pyg-team/pytorch_geometric/pull/4957), [#4973](https://github.com/pyg-team/pytorch_geometric/pull/4973), [#4973](https://github.com/pyg-team/pytorch_geometric/pull/4973), [#4986](https://github.com/pyg-team/pytorch_geometric/pull/4986), [#4995](https://github.com/pyg-team/pytorch_geometric/pull/4995), [#5000](https://github.com/pyg-team/pytorch_geometric/pull/5000), [#5034](https://github.com/pyg-team/pytorch_geometric/pull/5034), [#5036](https://github.com/pyg-team/pytorch_geometric/pull/5036), [#5039](https://github.com/pyg-team/pytorch_geometric/issues/5039), [#4522](https://github.com/pyg-team/pytorch_geometric/pull/4522), [#5033](https://github.com/pyg-team/pytorch_geometric/pull/5033]), [#5085](https://github.com/pyg-team/pytorch_geometric/pull/5085), [#5097](https://github.com/pyg-team/pytorch_geometric/pull/5097), [#5099](https://github.com/pyg-team/pytorch_geometric/pull/5099), [#5104](https://github.com/pyg-team/pytorch_geometric/pull/5104), [#5113](https://github.com/pyg-team/pytorch_geometric/pull/5113), [#5130](https://github.com/pyg-team/pytorch_geometric/pull/5130), [#5098](https://github.com/pyg-team/pytorch_geometric/pull/5098), [#5191](https://github.com/pyg-team/pytorch_geometric/pull/5191)) +- Added the `DimeNet++` model ([#4432](https://github.com/pyg-team/pytorch_geometric/pull/4432), [#4699](https://github.com/pyg-team/pytorch_geometric/pull/4699), [#4700](https://github.com/pyg-team/pytorch_geometric/pull/4700), [#4800](https://github.com/pyg-team/pytorch_geometric/pull/4800)) +- Added an example of using PyG with PyTorch Ignite ([#4487](https://github.com/pyg-team/pytorch_geometric/pull/4487)) +- Added `GroupAddRev` module with support for reducing training GPU memory ([#4671](https://github.com/pyg-team/pytorch_geometric/pull/4671), [#4701](https://github.com/pyg-team/pytorch_geometric/pull/4701), [#4715](https://github.com/pyg-team/pytorch_geometric/pull/4715), [#4730](https://github.com/pyg-team/pytorch_geometric/pull/4730)) +- Added benchmarks via [`wandb`](https://wandb.ai/site) ([#4656](https://github.com/pyg-team/pytorch_geometric/pull/4656), [#4672](https://github.com/pyg-team/pytorch_geometric/pull/4672), [#4676](https://github.com/pyg-team/pytorch_geometric/pull/4676)) +- Added `unbatch` functionality ([#4628](https://github.com/pyg-team/pytorch_geometric/pull/4628)) +- Confirm that `to_hetero()` works with custom functions, _e.g._, `dropout_adj` ([4653](https://github.com/pyg-team/pytorch_geometric/pull/4653)) +- Added the `MLP.plain_last=False` option ([4652](https://github.com/pyg-team/pytorch_geometric/pull/4652)) +- Added a check in `HeteroConv` and `to_hetero()` to ensure that `MessagePassing.add_self_loops` is disabled ([4647](https://github.com/pyg-team/pytorch_geometric/pull/4647)) +- Added `HeteroData.subgraph()`, `HeteroData.node_type_subgraph()` and `HeteroData.edge_type_subgraph()` support ([#4635](https://github.com/pyg-team/pytorch_geometric/pull/4635)) +- Added the `AQSOL` dataset ([#4626](https://github.com/pyg-team/pytorch_geometric/pull/4626)) +- Added `HeteroData.node_items()` and `HeteroData.edge_items()` functionality ([#4644](https://github.com/pyg-team/pytorch_geometric/pull/4644)) +- Added PyTorch Lightning support in GraphGym ([#4511](https://github.com/pyg-team/pytorch_geometric/pull/4511), [#4516](https://github.com/pyg-team/pytorch_geometric/pull/4516) [#4531](https://github.com/pyg-team/pytorch_geometric/pull/4531), [#4689](https://github.com/pyg-team/pytorch_geometric/pull/4689), [#4843](https://github.com/pyg-team/pytorch_geometric/pull/4843)) +- Added support for returning embeddings in `MLP` models ([#4625](https://github.com/pyg-team/pytorch_geometric/pull/4625)) +- Added faster initialization of `NeighborLoader` in case edge indices are already sorted (via `is_sorted=True`) ([#4620](https://github.com/pyg-team/pytorch_geometric/pull/4620), [#4702](https://github.com/pyg-team/pytorch_geometric/pull/4702)) +- Added `AddPositionalEncoding` transform ([#4521](https://github.com/pyg-team/pytorch_geometric/pull/4521)) +- Added `HeteroData.is_undirected()` support ([#4604](https://github.com/pyg-team/pytorch_geometric/pull/4604)) +- Added the `Genius` and `Wiki` datasets to `nn.datasets.LINKXDataset` ([#4570](https://github.com/pyg-team/pytorch_geometric/pull/4570), [#4600](https://github.com/pyg-team/pytorch_geometric/pull/4600)) +- Added `nn.aggr.EquilibrumAggregation` implicit global layer ([#4522](https://github.com/pyg-team/pytorch_geometric/pull/4522)) +- Added support for graph-level outputs in `to_hetero` ([#4582](https://github.com/pyg-team/pytorch_geometric/pull/4582)) +- Added `CHANGELOG.md` ([#4581](https://github.com/pyg-team/pytorch_geometric/pull/4581)) +- Added `HeteroData` support to the `RemoveIsolatedNodes` transform ([#4479](https://github.com/pyg-team/pytorch_geometric/pull/4479)) +- Added `HeteroData.num_features` functionality ([#4504](https://github.com/pyg-team/pytorch_geometric/pull/4504)) +- Added support for projecting features before propagation in `SAGEConv` ([#4437](https://github.com/pyg-team/pytorch_geometric/pull/4437)) +- Added `Geom-GCN` splits to the `Planetoid` datasets ([#4442](https://github.com/pyg-team/pytorch_geometric/pull/4442)) +- Added a `LinkNeighborLoader` for training scalable link predictions models [#4396](https://github.com/pyg-team/pytorch_geometric/pull/4396), [#4439](https://github.com/pyg-team/pytorch_geometric/pull/4439), [#4441](https://github.com/pyg-team/pytorch_geometric/pull/4441), [#4446](https://github.com/pyg-team/pytorch_geometric/pull/4446), [#4508](https://github.com/pyg-team/pytorch_geometric/pull/4508), [#4509](https://github.com/pyg-team/pytorch_geometric/pull/4509)) +- Added an unsupervised `GraphSAGE` example on `PPI` ([#4416](https://github.com/pyg-team/pytorch_geometric/pull/4416)) +- Added support for `LSTM` aggregation in `SAGEConv` ([#4379](https://github.com/pyg-team/pytorch_geometric/pull/4379)) +- Added support for floating-point labels in `RandomLinkSplit` ([#4311](https://github.com/pyg-team/pytorch_geometric/pull/4311), [#4383](https://github.com/pyg-team/pytorch_geometric/pull/4383)) +- Added support for `torch.data` `DataPipes` ([#4302](https://github.com/pyg-team/pytorch_geometric/pull/4302), [#4345](https://github.com/pyg-team/pytorch_geometric/pull/4345), [#4349](https://github.com/pyg-team/pytorch_geometric/pull/4349)) +- Added support for the `cosine` argument in the `KNNGraph`/`RadiusGraph` transforms ([#4344](https://github.com/pyg-team/pytorch_geometric/pull/4344)) +- Added support graph-level attributes in `networkx` conversion ([#4343](https://github.com/pyg-team/pytorch_geometric/pull/4343)) +- Added support for renaming node types via `HeteroData.rename` ([#4329](https://github.com/pyg-team/pytorch_geometric/pull/4329)) +- Added an example to load a trained PyG model in C++ ([#4307](https://github.com/pyg-team/pytorch_geometric/pull/4307)) +- Added a `MessagePassing.explain_message` method to customize making explanations on messages ([#4278](https://github.com/pyg-team/pytorch_geometric/pull/4278), [#4448](https://github.com/pyg-team/pytorch_geometric/pull/4448))) +- Added support for `GATv2Conv` in the `nn.models.GAT` model ([#4357](https://github.com/pyg-team/pytorch_geometric/pull/4357)) +- Added `HeteroData.subgraph` functionality ([#4243](https://github.com/pyg-team/pytorch_geometric/pull/4243)) +- Added the `MaskLabel` module and a corresponding masked label propagation example ([#4197](https://github.com/pyg-team/pytorch_geometric/pull/4197)) +- Added temporal sampling support to `NeighborLoader` ([#4025](https://github.com/pyg-team/pytorch_geometric/pull/4025)) +- Added an example for unsupervised heterogeneous graph learning based on "Deep Multiplex Graph Infomax" ([#3189](https://github.com/pyg-team/pytorch_geometric/pull/3189)) + +### Changed + +- Changed docstring for `RandomLinkSplit` ([#5190](https://github.com/pyg-team/pytorch_geometric/issues/5190)) +- Switched to PyTorch `scatter_reduce` implementation - experimental feature ([#5120](https://github.com/pyg-team/pytorch_geometric/pull/5120)) +- Fixed `RGATConv` device mismatches for `f-scaled` mode ([#5187](https://github.com/pyg-team/pytorch_geometric/pull/5187)] +- Allow for multi-dimensional `edge_labels` in `LinkNeighborLoader` ([#5186](https://github.com/pyg-team/pytorch_geometric/pull/5186)] +- Fixed `GINEConv` bug with non-sequential input ([#5154](https://github.com/pyg-team/pytorch_geometric/pull/5154)] +- Improved error message ([#5095](https://github.com/pyg-team/pytorch_geometric/pull/5095)) +- Fixed `HGTLoader` bug which produced outputs with missing edge types ([#5067](https://github.com/pyg-team/pytorch_geometric/pull/5067)) +- Fixed dynamic inheritance issue in data batching ([#5051](https://github.com/pyg-team/pytorch_geometric/pull/5051)) +- Fixed `load_state_dict` in `Linear` with `strict=False` mode ([5094](https://github.com/pyg-team/pytorch_geometric/pull/5094)) +- Fixed typo in `MaskLabel.ratio_mask` ([5093](https://github.com/pyg-team/pytorch_geometric/pull/5093)) +- Fixed `data.num_node_features` computation for sparse matrices ([5089](https://github.com/pyg-team/pytorch_geometric/pull/5089)) +- Fixed `torch.fx` bug with `torch.nn.aggr` package ([#5021](https://github.com/pyg-team/pytorch_geometric/pull/5021))) +- Fixed `GenConv` test ([4993](https://github.com/pyg-team/pytorch_geometric/pull/4993)) +- Fixed packaging tests for Python 3.10 ([4982](https://github.com/pyg-team/pytorch_geometric/pull/4982)) +- Changed `act_dict` (part of `graphgym`) to create individual instances instead of reusing the same ones everywhere ([4978](https://github.com/pyg-team/pytorch_geometric/pull/4978)) +- Fixed issue where one-hot tensors were passed to `F.one_hot` ([4970](https://github.com/pyg-team/pytorch_geometric/pull/4970)) +- Fixed `bool` arugments in `argparse` in `benchmark/` ([#4967](https://github.com/pyg-team/pytorch_geometric/pull/4967)) +- Fixed `BasicGNN` for `num_layers=1`, which now respects a desired number of `out_channels` ([#4943](https://github.com/pyg-team/pytorch_geometric/pull/4943)) +- `len(batch)` will now return the number of graphs inside the batch, not the number of attributes ([#4931](https://github.com/pyg-team/pytorch_geometric/pull/4931)) +- Fixed `data.subgraph` generation for 0-dim tensors ([#4932](https://github.com/pyg-team/pytorch_geometric/pull/4932)) +- Removed unnecssary inclusion of self-loops when sampling negative edges ([#4880](https://github.com/pyg-team/pytorch_geometric/pull/4880)) +- Fixed `InMemoryDataset` inferring wrong `len` for lists of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) +- Fixed `Batch.separate` when using it for lists of tensors ([#4837](https://github.com/pyg-team/pytorch_geometric/pull/4837)) +- Correct docstring for SAGEConv ([#4852](https://github.com/pyg-team/pytorch_geometric/pull/4852)) +- Fixed a bug in `TUDataset` where `pre_filter` was not applied whenever `pre_transform` was present +- Renamed `RandomTranslate` to `RandomJitter` - the usage of `RandomTranslate` is now deprecated ([#4828](https://github.com/pyg-team/pytorch_geometric/pull/4828)) +- Do not allow accessing edge types in `HeteroData` with two node types when there exists multiple relations between these types ([#4782](https://github.com/pyg-team/pytorch_geometric/pull/4782)) +- Allow `edge_type == rev_edge_type` argument in `RandomLinkSplit` ([#4757](https://github.com/pyg-team/pytorch_geometric/pull/4757), [#5221](https://github.com/pyg-team/pytorch_geometric/pull/5221)) +- Fixed a numerical instability in the `GeneralConv` and `neighbor_sample` tests ([#4754](https://github.com/pyg-team/pytorch_geometric/pull/4754)) +- Fixed a bug in `HANConv` in which destination node features rather than source node features were propagated ([#4753](https://github.com/pyg-team/pytorch_geometric/pull/4753)) +- Fixed versions of `checkout` and `setup-python` in CI ([#4751](https://github.com/pyg-team/pytorch_geometric/pull/4751)) +- Fixed `protobuf` version ([#4719](https://github.com/pyg-team/pytorch_geometric/pull/4719)) +- Fixed the ranking protocol bug in the RGCN link prediction example ([#4688](https://github.com/pyg-team/pytorch_geometric/pull/4688)) +- Math support in Markdown ([#4683](https://github.com/pyg-team/pytorch_geometric/pull/4683)) +- Allow for `setter` properties in `Data` ([#4682](https://github.com/pyg-team/pytorch_geometric/pull/4682), [#4686](https://github.com/pyg-team/pytorch_geometric/pull/4686)) +- Allow for optional `edge_weight` in `GCN2Conv` ([#4670](https://github.com/pyg-team/pytorch_geometric/pull/4670)) +- Fixed the interplay between `TUDataset` and `pre_transform` that modify node features ([#4669](https://github.com/pyg-team/pytorch_geometric/pull/4669)) +- Make use of the `pyg_sphinx_theme` documentation template ([#4664](https://github.com/pyg-team/pyg-lib/pull/4664), [#4667](https://github.com/pyg-team/pyg-lib/pull/4667)) +- Refactored reading molecular positions from sdf file for qm9 datasets ([4654](https://github.com/pyg-team/pytorch_geometric/pull/4654)) +- Fixed `MLP.jittable()` bug in case `return_emb=True` ([#4645](https://github.com/pyg-team/pytorch_geometric/pull/4645), [#4648](https://github.com/pyg-team/pytorch_geometric/pull/4648)) +- The generated node features of `StochasticBlockModelDataset` are now ordered with respect to their labels ([#4617](https://github.com/pyg-team/pytorch_geometric/pull/4617)) +- Fixed typos in the documentation ([#4616](https://github.com/pyg-team/pytorch_geometric/pull/4616), [#4824](https://github.com/pyg-team/pytorch_geometric/pull/4824), [#4895](https://github.com/pyg-team/pytorch_geometric/pull/4895), [#5161](https://github.com/pyg-team/pytorch_geometric/pull/5161)) +- The `bias` argument in `TAGConv` is now actually applied ([#4597](https://github.com/pyg-team/pytorch_geometric/pull/4597)) +- Fixed subclass behavior of `process` and `download` in `Datsaet` ([#4586](https://github.com/pyg-team/pytorch_geometric/pull/4586)) +- Fixed filtering of attributes for loaders in case `__cat_dim__ != 0` ([#4629](https://github.com/pyg-team/pytorch_geometric/pull/4629)) +- Fixed `SparseTensor` support in `NeighborLoader` ([#4320](https://github.com/pyg-team/pytorch_geometric/pull/4320)) +- Fixed average degree handling in `PNAConv` ([#4312](https://github.com/pyg-team/pytorch_geometric/pull/4312)) +- Fixed a bug in `from_networkx` in case some attributes are PyTorch tensors ([#4486](https://github.com/pyg-team/pytorch_geometric/pull/4486)) +- Added a missing clamp in `DimeNet` ([#4506](https://github.com/pyg-team/pytorch_geometric/pull/4506), [#4562](https://github.com/pyg-team/pytorch_geometric/pull/4562)) +- Fixed the download link in `DBP15K` ([#4428](https://github.com/pyg-team/pytorch_geometric/pull/4428)) +- Fixed an autograd bug in `DimeNet` when resetting parameters ([#4424](https://github.com/pyg-team/pytorch_geometric/pull/4424)) +- Fixed bipartite message passing in case `flow="target_to_source"` ([#4418](https://github.com/pyg-team/pytorch_geometric/pull/4418)) +- Fixed a bug in which `num_nodes` was not properly updated in the `FixedPoints` transform ([#4394](https://github.com/pyg-team/pytorch_geometric/pull/4394)) +- PyTorch Lightning >= 1.6 support ([#4377](https://github.com/pyg-team/pytorch_geometric/pull/4377)) +- Fixed a bug in which `GATConv` was not jittable ([#4347](https://github.com/pyg-team/pytorch_geometric/pull/4347)) +- Fixed a bug in which the GraphGym config was not stored in each specific experiment directory ([#4338](https://github.com/pyg-team/pytorch_geometric/pull/4338)) +- Fixed a bug in which `nn.models.GAT` did not produce `out_channels`-many output channels ([#4299](https://github.com/pyg-team/pytorch_geometric/pull/4299)) +- Fixed mini-batching with empty lists as attributes ([#4293](https://github.com/pyg-team/pytorch_geometric/pull/4293)) +- Fixed a bug in which `GCNConv` could not be combined with `to_hetero` on heterogeneous graphs with one node type ([#4279](https://github.com/pyg-team/pytorch_geometric/pull/4279)) + +### Removed + +- Remove internal metrics in favor of `torchmetrics` ([#4287](https://github.com/pyg-team/pytorch_geometric/pull/4287)) diff --git a/pytorch_geometric-2.3.1/CITATION.cff b/pytorch_geometric-2.4.0/CITATION.cff similarity index 100% rename from pytorch_geometric-2.3.1/CITATION.cff rename to pytorch_geometric-2.4.0/CITATION.cff diff --git a/pytorch_geometric-2.3.1/LICENSE b/pytorch_geometric-2.4.0/LICENSE similarity index 100% rename from pytorch_geometric-2.3.1/LICENSE rename to pytorch_geometric-2.4.0/LICENSE diff --git a/pytorch_geometric-2.4.0/README.md b/pytorch_geometric-2.4.0/README.md new file mode 100644 index 0000000..e1190ca --- /dev/null +++ b/pytorch_geometric-2.4.0/README.md @@ -0,0 +1,471 @@ +[pypi-image]: https://badge.fury.io/py/torch-geometric.svg +[pypi-url]: https://pypi.python.org/pypi/torch-geometric +[testing-image]: https://github.com/pyg-team/pytorch_geometric/actions/workflows/testing.yml/badge.svg +[testing-url]: https://github.com/pyg-team/pytorch_geometric/actions/workflows/testing.yml +[linting-image]: https://github.com/pyg-team/pytorch_geometric/actions/workflows/linting.yml/badge.svg +[linting-url]: https://github.com/pyg-team/pytorch_geometric/actions/workflows/linting.yml +[docs-image]: https://readthedocs.org/projects/pytorch-geometric/badge/?version=latest +[docs-url]: https://pytorch-geometric.readthedocs.io/en/latest +[coverage-image]: https://codecov.io/gh/pyg-team/pytorch_geometric/branch/master/graph/badge.svg +[coverage-url]: https://codecov.io/github/pyg-team/pytorch_geometric?branch=master +[contributing-image]: https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat +[contributing-url]: https://github.com/pyg-team/pytorch_geometric/blob/master/.github/CONTRIBUTING.md +[slack-image]: https://img.shields.io/badge/slack-pyg-brightgreen +[slack-url]: https://data.pyg.org/slack.html + +

+ +

+ +-------------------------------------------------------------------------------- + +[![PyPI Version][pypi-image]][pypi-url] +[![Testing Status][testing-image]][testing-url] +[![Linting Status][linting-image]][linting-url] +[![Docs Status][docs-image]][docs-url] +[![Contributing][contributing-image]][contributing-url] +[![Slack][slack-image]][slack-url] + +**[Documentation](https://pytorch-geometric.readthedocs.io)** | **[Paper](https://arxiv.org/abs/1903.02428)** | **[Colab Notebooks and Video Tutorials](https://pytorch-geometric.readthedocs.io/en/latest/get_started/colabs.html)** | **[External Resources](https://pytorch-geometric.readthedocs.io/en/latest/external/resources.html)** | **[OGB Examples](https://github.com/snap-stanford/ogb/tree/master/examples)** + +**PyG** *(PyTorch Geometric)* is a library built upon [PyTorch](https://pytorch.org/) to easily write and train Graph Neural Networks (GNNs) for a wide range of applications related to structured data. + +It consists of various methods for deep learning on graphs and other irregular structures, also known as *[geometric deep learning](http://geometricdeeplearning.com/)*, from a variety of published papers. +In addition, it consists of easy-to-use mini-batch loaders for operating on many small and single giant graphs, [multi GPU-support](https://github.com/pyg-team/pytorch_geometric/tree/master/examples/multi_gpu), [`torch.compile`](https://pytorch-geometric.readthedocs.io/en/latest/advanced/compile.html) support, [`DataPipe`](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/datapipe.py) support, a large number of common benchmark datasets (based on simple interfaces to create your own), the [GraphGym](https://pytorch-geometric.readthedocs.io/en/latest/advanced/graphgym.html) experiment manager, and helpful transforms, both for learning on arbitrary graphs as well as on 3D meshes or point clouds. + +**[Click here to join our Slack community!][slack-url]** + +

+ +

+ +-------------------------------------------------------------------------------- + +* [Library Highlights](#library-highlights) +* [Quick Tour for New Users](#quick-tour-for-new-users) +* [Architecture Overview](#architecture-overview) +* [Implemented GNN Models](#implemented-gnn-models) +* [Installation](#installation) + +## Library Highlights + +Whether you are a machine learning researcher or first-time user of machine learning toolkits, here are some reasons to try out PyG for machine learning on graph-structured data. + +* **Easy-to-use and unified API**: + All it takes is 10-20 lines of code to get started with training a GNN model (see the next section for a [quick tour](#quick-tour-for-new-users)). + PyG is *PyTorch-on-the-rocks*: It utilizes a tensor-centric API and keeps design principles close to vanilla PyTorch. + If you are already familiar with PyTorch, utilizing PyG is straightforward. +* **Comprehensive and well-maintained GNN models**: + Most of the state-of-the-art Graph Neural Network architectures have been implemented by library developers or authors of research papers and are ready to be applied. +* **Great flexibility**: + Existing PyG models can easily be extended for conducting your own research with GNNs. + Making modifications to existing models or creating new architectures is simple, thanks to its easy-to-use message passing API, and a variety of operators and utility functions. +* **Large-scale real-world GNN models**: + We focus on the need of GNN applications in challenging real-world scenarios, and support learning on diverse types of graphs, including but not limited to: scalable GNNs for graphs with millions of nodes; dynamic GNNs for node predictions over time; heterogeneous GNNs with multiple node types and edge types. +* **GraphGym integration**: GraphGym lets users easily reproduce GNN experiments, is able to launch and analyze thousands of different GNN configurations, and is customizable by registering new modules to a GNN learning pipeline. + +## Quick Tour for New Users + +In this quick tour, we highlight the ease of creating and training a GNN model with only a few lines of code. + +### Train your own GNN model + +In the first glimpse of PyG, we implement the training of a GNN for classifying papers in a citation graph. +For this, we load the [Cora](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.datasets.Planetoid.html) dataset, and create a simple 2-layer GCN model using the pre-defined [`GCNConv`](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GCNConv.html): + +```python +import torch +from torch import Tensor +from torch_geometric.nn import GCNConv +from torch_geometric.datasets import Planetoid + +dataset = Planetoid(root='.', name='Cora') + +class GCN(torch.nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels): + super().__init__() + self.conv1 = GCNConv(in_channels, hidden_channels) + self.conv2 = GCNConv(hidden_channels, out_channels) + + def forward(self, x: Tensor, edge_index: Tensor) -> Tensor: + # x: Node feature matrix of shape [num_nodes, in_channels] + # edge_index: Graph connectivity matrix of shape [2, num_edges] + x = self.conv1(x, edge_index).relu() + x = self.conv2(x, edge_index) + return x + +model = GCN(dataset.num_features, 16, dataset.num_classes) +``` + +
+We can now optimize the model in a training loop, similar to the standard PyTorch training procedure. + +```python +import torch.nn.functional as F + +data = dataset[0] +optimizer = torch.optim.Adam(model.parameters(), lr=0.01) + +for epoch in range(200): + pred = model(data.x, data.edge_index) + loss = F.cross_entropy(pred[data.train_mask], data.y[data.train_mask]) + + # Backpropagation + optimizer.zero_grad() + loss.backward() + optimizer.step() +``` +
+ +More information about evaluating final model performance can be found in the corresponding [example](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/gcn.py). + +### Create your own GNN layer + +In addition to the easy application of existing GNNs, PyG makes it simple to implement custom Graph Neural Networks (see [here](https://pytorch-geometric.readthedocs.io/en/latest/tutorial/create_gnn.html) for the accompanying tutorial). +For example, this is all it takes to implement the [edge convolutional layer](https://arxiv.org/abs/1801.07829) from Wang *et al.*: + +$$x_i^{\prime} ~ = ~ \max_{j \in \mathcal{N}(i)} ~ \textrm{MLP}_{\theta} \left( [ ~ x_i, ~ x_j - x_i ~ ] \right)$$ + +```python +import torch +from torch import Tensor +from torch.nn import Sequential, Linear, ReLU +from torch_geometric.nn import MessagePassing + +class EdgeConv(MessagePassing): + def __init__(self, in_channels, out_channels): + super().__init__(aggr="max") # "Max" aggregation. + self.mlp = Sequential( + Linear(2 * in_channels, out_channels), + ReLU(), + Linear(out_channels, out_channels), + ) + + def forward(self, x: Tensor, edge_index: Tensor) -> Tensor: + # x: Node feature matrix of shape [num_nodes, in_channels] + # edge_index: Graph connectivity matrix of shape [2, num_edges] + return self.propagate(edge_index, x=x) # shape [num_nodes, out_channels] + + def message(self, x_j: Tensor, x_i: Tensor) -> Tensor: + # x_j: Source node features of shape [num_edges, in_channels] + # x_i: Target node features of shape [num_edges, in_channels] + edge_features = torch.cat([x_i, x_j - x_i], dim=-1) + return self.mlp(edge_features) # shape [num_edges, out_channels] +``` + +### Manage experiments with GraphGym + +GraphGym allows you to manage and launch GNN experiments, using a highly modularized pipeline (see [here](https://pytorch-geometric.readthedocs.io/en/latest/advanced/graphgym.html) for the accompanying tutorial). + +``` +git clone https://github.com/pyg-team/pytorch_geometric.git +cd pytorch_geometric/graphgym +bash run_single.sh # run a single GNN experiment (node/edge/graph-level) +bash run_batch.sh # run a batch of GNN experiments, using differnt GNN designs/datasets/tasks +``` + +Users are highly encouraged to check out the [documentation](https://pytorch-geometric.readthedocs.io/en/latest), which contains additional tutorials on the essential functionalities of PyG, including data handling, creation of datasets and a full list of implemented methods, transforms, and datasets. +For a quick start, check out our [examples](https://github.com/pyg-team/pytorch_geometric/tree/master/examples) in `examples/`. + +## Architecture Overview + +PyG provides a multi-layer framework that enables users to build Graph Neural Network solutions on both low and high levels. +It comprises of the following components: + +* The PyG **engine** utilizes the powerful PyTorch deep learning framework with full [`torch.compile`](https://pytorch-geometric.readthedocs.io/en/latest/advanced/compile.html) and [TorchScript](https://pytorch-geometric.readthedocs.io/en/latest/advanced/jit.html) support, as well as additions of efficient CPU/CUDA libraries for operating on sparse data, *e.g.*, [`pyg-lib`](https://github.com/pyg-team/pyg-lib). +* The PyG **storage** handles data processing, transformation and loading pipelines. It is capable of handling and processing large-scale graph datasets, and provides effective solutions for heterogeneous graphs. It further provides a variety of sampling solutions, which enable training of GNNs on large-scale graphs. +* The PyG **operators** bundle essential functionalities for implementing Graph Neural Networks. PyG supports important GNN building blocks that can be combined and applied to various parts of a GNN model, ensuring rich flexibility of GNN design. +* Finally, PyG provides an abundant set of GNN **models**, and examples that showcase GNN models on standard graph benchmarks. Thanks to its flexibility, users can easily build and modify custom GNN models to fit their specific needs. + +

+ +

+ +## Implemented GNN Models + +We list currently supported PyG models, layers and operators according to category: + +**GNN layers:** +All Graph Neural Network layers are implemented via the **[`nn.MessagePassing`](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.MessagePassing.html)** interface. +A GNN layer specifies how to perform message passing, *i.e.* by designing different message, aggregation and update functions as defined [here](https://pytorch-geometric.readthedocs.io/en/latest/tutorial/create_gnn.html). +These GNN layers can be stacked together to create Graph Neural Network models. + +* **[GCNConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GCNConv.html)** from Kipf and Welling: [Semi-Supervised Classification with Graph Convolutional Networks](https://arxiv.org/abs/1609.02907) (ICLR 2017) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/gcn.py)] +* **[ChebConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.ChebConv.html)** from Defferrard *et al.*: [Convolutional Neural Networks on Graphs with Fast Localized Spectral Filtering](https://arxiv.org/abs/1606.09375) (NIPS 2016) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/gcn.py#L36-L37)] +* **[GATConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GATConv.html)** from Veličković *et al.*: [Graph Attention Networks](https://arxiv.org/abs/1710.10903) (ICLR 2018) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/gat.py)] + +
+Expand to see all implemented GNN layers... + +* **[GCN2Conv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GCN2Conv.html)** from Chen *et al.*: [Simple and Deep Graph Convolutional Networks](https://arxiv.org/abs/2007.02133) (ICML 2020) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/gcn2_cora.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/gcn2_ppi.py)] +* **[SplineConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.SplineConv.html)** from Fey *et al.*: [SplineCNN: Fast Geometric Deep Learning with Continuous B-Spline Kernels](https://arxiv.org/abs/1711.08920) (CVPR 2018) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/cora.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/faust.py)] +* **[NNConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.NNConv.html)** from Gilmer *et al.*: [Neural Message Passing for Quantum Chemistry](https://arxiv.org/abs/1704.01212) (ICML 2017) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/qm9_nn_conv.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/mnist_nn_conv.py)] +* **[CGConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.CGConv.html)** from Xie and Grossman: [Crystal Graph Convolutional Neural Networks for an Accurate and Interpretable Prediction of Material Properties](https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.120.145301) (Physical Review Letters 120, 2018) +* **[ECConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.ECConv.html)** from Simonovsky and Komodakis: [Edge-Conditioned Convolution on Graphs](https://arxiv.org/abs/1704.02901) (CVPR 2017) +* **[EGConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.EGConv.html)** from Tailor *et al.*: [Adaptive Filters and Aggregator Fusion for Efficient Graph Convolutions](https://arxiv.org/abs/2104.01481) (GNNSys 2021) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/egc.py)] +* **[GATv2Conv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GATv2Conv.html)** from Brody *et al.*: [How Attentive are Graph Attention Networks?](https://arxiv.org/abs/2105.14491) (ICLR 2022) +* **[TransformerConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.TransformerConv.html)** from Shi *et al.*: [Masked Label Prediction: Unified Message Passing Model for Semi-Supervised Classification](https://arxiv.org/abs/2009.03509) (CoRR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/unimp_arxiv.py)] +* **[SAGEConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.SAGEConv.html)** from Hamilton *et al.*: [Inductive Representation Learning on Large Graphs](https://arxiv.org/abs/1706.02216) (NIPS 2017) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/reddit.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/ogbn_products_sage.py), [**Example3**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/graph_sage_unsup.py), [**Example4**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/graph_sage_unsup_ppi.py)] +* **[GraphConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GraphConv.html)** from, *e.g.*, Morris *et al.*: [Weisfeiler and Leman Go Neural: Higher-order Graph Neural Networks](https://arxiv.org/abs/1810.02244) (AAAI 2019) +* **[GatedGraphConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GatedGraphConv.html)** from Li *et al.*: [Gated Graph Sequence Neural Networks](https://arxiv.org/abs/1511.05493) (ICLR 2016) +* **[ResGatedGraphConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.ResGatedGraphConv.html)** from Bresson and Laurent: [Residual Gated Graph ConvNets](https://arxiv.org/abs/1711.07553) (CoRR 2017) +* **[GINConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GINConv.html)** from Xu *et al.*: [How Powerful are Graph Neural Networks?](https://arxiv.org/abs/1810.00826) (ICLR 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/mutag_gin.py)] +* **[GINEConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GINEConv.html)** from Hu *et al.*: [Strategies for Pre-training Graph Neural Networks](https://arxiv.org/abs/1905.12265) (ICLR 2020) +* **[ARMAConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.ARMAConv.html)** from Bianchi *et al.*: [Graph Neural Networks with Convolutional ARMA Filters](https://arxiv.org/abs/1901.01343) (CoRR 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/arma.py)] +* **[SGConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.SGConv.html)** from Wu *et al.*: [Simplifying Graph Convolutional Networks](https://arxiv.org/abs/1902.07153) (CoRR 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/sgc.py)] +* **[APPNP](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.APPNP.html)** from Klicpera *et al.*: [Predict then Propagate: Graph Neural Networks meet Personalized PageRank](https://arxiv.org/abs/1810.05997) (ICLR 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/citation/appnp.py)] +* **[MFConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.MFConv.html)** from Duvenaud *et al.*: [Convolutional Networks on Graphs for Learning Molecular Fingerprints](https://arxiv.org/abs/1509.09292) (NIPS 2015) +* **[AGNNConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.AGNNConv.html)** from Thekumparampil *et al.*: [Attention-based Graph Neural Network for Semi-Supervised Learning](https://arxiv.org/abs/1803.03735) (CoRR 2017) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/agnn.py)] +* **[TAGConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.TAGConv.html)** from Du *et al.*: [Topology Adaptive Graph Convolutional Networks](https://arxiv.org/abs/1710.10370) (CoRR 2017) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/tagcn.py)] +* **[PNAConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.PNAConv.html)** from Corso *et al.*: [Principal Neighbourhood Aggregation for Graph Nets](https://arxiv.org/abs/2004.05718) (CoRR 2020) [**[Example](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/pna.py)**] +* **[FAConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.FAConv.html)** from Bo *et al.*: [Beyond Low-Frequency Information in Graph Convolutional Networks](https://arxiv.org/abs/2101.00797) (AAAI 2021) +* **[PDNConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.nn.conv.PDNConv.html)** from Rozemberczki *et al.*: [Pathfinder Discovery Networks for Neural Message Passing](https://arxiv.org/abs/2010.12878) (WWW 2021) +* **[RGCNConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.RGCNConv.html)** from Schlichtkrull *et al.*: [Modeling Relational Data with Graph Convolutional Networks](https://arxiv.org/abs/1703.06103) (ESWC 2018) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/rgcn.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/rgcn_link_pred.py)] +* **[RGATConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.RGATConv.html)** from Busbridge *et al.*: [Relational Graph Attention Networks](https://arxiv.org/abs/1904.05811) (CoRR 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/rgat.py)] +* **[FiLMConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.FiLMConv.html)** from Brockschmidt: [GNN-FiLM: Graph Neural Networks with Feature-wise Linear Modulation](https://arxiv.org/abs/1906.12192) (ICML 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/film.py)] +* **[SignedConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.SignedConv.html)** from Derr *et al.*: [Signed Graph Convolutional Network](https://arxiv.org/abs/1808.06354) (ICDM 2018) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/signed_gcn.py)] +* **[DNAConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.DNAConv.html)** from Fey: [Just Jump: Dynamic Neighborhood Aggregation in Graph Neural Networks](https://arxiv.org/abs/1904.04849) (ICLR-W 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/dna.py)] +* **[PANConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.PANConv.html)** from Ma *et al.*: [Path Integral Based Convolution and Pooling for Graph Neural Networks](https://arxiv.org/abs/2006.16811) (NeurIPS 2020) +* **[PointNetConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.PointNetConv.html)** (including **[Iterative Farthest Point Sampling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.fps.html)**, dynamic graph generation based on **[nearest neighbor](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.knn_graph.html)** or **[maximum distance](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.radius_graph.html)**, and **[k-NN interpolation](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.unpool.knn_interpolate.html)** for upsampling) from Qi *et al.*: [PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation](https://arxiv.org/abs/1612.00593) (CVPR 2017) and [PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space](https://arxiv.org/abs/1706.02413) (NIPS 2017) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/pointnet2_classification.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/pointnet2_segmentation.py)] +* **[EdgeConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.EdgeConv.html)** from Wang *et al.*: [Dynamic Graph CNN for Learning on Point Clouds](https://arxiv.org/abs/1801.07829) (CoRR, 2018) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/dgcnn_classification.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/dgcnn_segmentation.py)] +* **[XConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.XConv.html)** from Li *et al.*: [PointCNN: Convolution On X-Transformed Points](https://arxiv.org/abs/1801.07791) (NeurIPS 2018) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/points/point_cnn.py)] +* **[PPFConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.PPFConv.html)** from Deng *et al.*: [PPFNet: Global Context Aware Local Features for Robust 3D Point Matching](https://arxiv.org/abs/1802.02669) (CVPR 2018) +* **[GMMConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GMMConv.html)** from Monti *et al.*: [Geometric Deep Learning on Graphs and Manifolds using Mixture Model CNNs](https://arxiv.org/abs/1611.08402) (CVPR 2017) +* **[FeaStConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.FeaStConv.html)** from Verma *et al.*: [FeaStNet: Feature-Steered Graph Convolutions for 3D Shape Analysis](https://arxiv.org/abs/1706.05206) (CVPR 2018) +* **[PointTransformerConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.PointTransformerConv.html)** from Zhao *et al.*: [Point Transformer](https://arxiv.org/abs/2012.09164) (2020) +* **[HypergraphConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.HypergraphConv.html)** from Bai *et al.*: [Hypergraph Convolution and Hypergraph Attention](https://arxiv.org/abs/1901.08150) (CoRR 2019) +* **[GravNetConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GravNetConv.html)** from Qasim *et al.*: [Learning Representations of Irregular Particle-detector Geometry with Distance-weighted Graph Networks](https://arxiv.org/abs/1902.07987) (European Physics Journal C, 2019) +* **[SuperGAT](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.SuperGATConv.html)** from Kim and Oh: [How To Find Your Friendly Neighborhood: Graph Attention Design With Self-Supervision](https://openreview.net/forum?id=Wi5KUNlqWty) (ICLR 2021) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/super_gat.py)] +* **[HGTConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.HGTConv.html)** from Hu *et al.*: [Heterogeneous Graph Transformer](https://arxiv.org/abs/2003.01332) (WWW 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/hetero/hgt_dblp.py)] +* **[HEATConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.HEATonv.html)** from Mo *et al.*: [Heterogeneous Edge-Enhanced Graph Attention Network For Multi-Agent Trajectory Prediction](https://arxiv.org/abs/2106.07161) (CoRR 2021) +* **[SSGConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.SSGConv.html)** from Zhu *et al.*: [Simple Spectral Graph Convolution](https://openreview.net/forum?id=CYO5T-YjWZV) (ICLR 2021) +* **[FusedGATConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.FusedGATConv.html)** from Zhang *et al.*: [Understanding GNN Computational Graph: A Coordinated Computation, IO, and Memory Perspective](https://proceedings.mlsys.org/paper/2022/file/9a1158154dfa42caddbd0694a4e9bdc8-Paper.pdf) (MLSys 2022) +* **[GPSConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GPSConv.html)** from Rampášek *et al.*: [Recipe for a General, Powerful, Scalable Graph Transformer](https://arxiv.org/abs/2205.12454) (NeurIPS 2022) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/graph_gps.py)] +
+ +**Pooling layers:** +Graph pooling layers combine the vectorial representations of a set of nodes in a graph (or a subgraph) into a single vector representation that summarizes its properties of nodes. +It is commonly applied to graph-level tasks, which require combining node features into a single graph representation. + +* **[Top-K Pooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.TopKPooling.html)** from Gao and Ji: [Graph U-Nets](https://arxiv.org/abs/1905.05178) (ICML 2019), Cangea *et al.*: [Towards Sparse Hierarchical Graph Classifiers](https://arxiv.org/abs/1811.01287) (NeurIPS-W 2018) and Knyazev *et al.*: [Understanding Attention and Generalization in Graph Neural Networks](https://arxiv.org/abs/1905.02850) (ICLR-W 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/proteins_topk_pool.py)] +* **[DiffPool](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.dense.dense_diff_pool.html)** from Ying *et al.*: [Hierarchical Graph Representation Learning with Differentiable Pooling](https://arxiv.org/abs/1806.08804) (NeurIPS 2018) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/proteins_diff_pool.py)] + +
+Expand to see all implemented pooling layers... + +* **[Attentional Aggregation](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.aggr.AttentionalAggregation.html)** from Li *et al.*: [Graph Matching Networks for Learning the Similarity of Graph Structured Objects](https://arxiv.org/abs/1904.12787) (ICML 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/global_attention.py)] +* **[Set2Set](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.aggr.Set2Set.html)** from Vinyals *et al.*: [Order Matters: Sequence to Sequence for Sets](https://arxiv.org/abs/1511.06391) (ICLR 2016) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/set2set.py)] +* **[Sort Aggregation](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.aggr.SortAggregation.html)** from Zhang *et al.*: [An End-to-End Deep Learning Architecture for Graph Classification](https://www.cse.wustl.edu/~muhan/papers/AAAI_2018_DGCNN.pdf) (AAAI 2018) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/sort_pool.py)] +* **[MinCut Pooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.dense.dense_mincut_pool.html)** from Bianchi *et al.*: [Spectral Clustering with Graph Neural Networks for Graph Pooling](https://arxiv.org/abs/1907.00481) (ICML 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/proteins_mincut_pool.py)] +* **[DMoN Pooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.dense.DMoNPooling.html)** from Tsitsulin *et al.*: [Graph Clustering with Graph Neural Networks](https://arxiv.org/abs/2006.16904) (CoRR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/proteins_dmon_pool.py)] +* **[Graclus Pooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.graclus.html)** from Dhillon *et al.*: [Weighted Graph Cuts without Eigenvectors: A Multilevel Approach](http://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf) (PAMI 2007) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/mnist_graclus.py)] +* **[Voxel Grid Pooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.voxel_grid.html)** from, *e.g.*, Simonovsky and Komodakis: [Dynamic Edge-Conditioned Filters in Convolutional Neural Networks on Graphs](https://arxiv.org/abs/1704.02901) (CVPR 2017) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/mnist_voxel_grid.py)] +* **[SAG Pooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.SAGPooling.html)** from Lee *et al.*: [Self-Attention Graph Pooling](https://arxiv.org/abs/1904.08082) (ICML 2019) and Knyazev *et al.*: [Understanding Attention and Generalization in Graph Neural Networks](https://arxiv.org/abs/1905.02850) (ICLR-W 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/sag_pool.py)] +* **[Edge Pooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.EdgePooling.html)** from Diehl *et al.*: [Towards Graph Pooling by Edge Contraction](https://graphreason.github.io/papers/17.pdf) (ICML-W 2019) and Diehl: [Edge Contraction Pooling for Graph Neural Networks](https://arxiv.org/abs/1905.10990) (CoRR 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/edge_pool.py)] +* **[ASAPooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.ASAPooling.html)** from Ranjan *et al.*: [ASAP: Adaptive Structure Aware Pooling for Learning Hierarchical Graph Representations](https://arxiv.org/abs/1911.07979) (AAAI 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/asap.py)] +* **[PANPooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.PANPooling.html)** from Ma *et al.*: [Path Integral Based Convolution and Pooling for Graph Neural Networks](https://arxiv.org/abs/2006.16811) (NeurIPS 2020) +* **[MemPooling](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.pool.MemPooling.html)** from Khasahmadi *et al.*: [Memory-Based Graph Networks](https://arxiv.org/abs/2002.09518) (ICLR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/mem_pool.py)] +* **[Graph Multiset Transformer](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.aggr.GraphMultisetTransformer.html)** from Baek *et al.*: [Accurate Learning of Graph Representations with Graph Multiset Pooling](https://arxiv.org/abs/2102.11533) (ICLR 2021) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/proteins_gmt.py)] +* **[Equilibrium Aggregation](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.aggr.EquilibriumAggregation.html)** from Bartunov *et al.*: [](https://arxiv.org/abs/2202.12795) (UAI 2022) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/equilibrium_median.py)] +
+ +**GNN models:** +Our supported GNN models incorporate multiple message passing layers, and users can directly use these pre-defined models to make predictions on graphs. +Unlike simple stacking of GNN layers, these models could involve pre-processing, additional learnable parameters, skip connections, graph coarsening, etc. + +* **[SchNet](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.SchNet.html)** from Schütt *et al.*: [SchNet: A Continuous-filter Convolutional Neural Network for Modeling Quantum Interactions](https://arxiv.org/abs/1706.08566) (NIPS 2017) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/qm9_pretrained_schnet.py)] +* **[DimeNet](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.DimeNet.html)** and **[DimeNetPlusPlus](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.DimeNetPlusPlus.html)** from Klicpera *et al.*: [Directional Message Passing for Molecular Graphs](https://arxiv.org/abs/2003.03123) (ICLR 2020) and [Fast and Uncertainty-Aware Directional Message Passing for Non-Equilibrium Molecules](https://arxiv.org/abs/2011.14115) (NeurIPS-W 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/qm9_pretrained_dimenet.py)] +* **[Node2Vec](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.Node2Vec.html)** from Grover and Leskovec: [node2vec: Scalable Feature Learning for Networks](https://arxiv.org/abs/1607.00653) (KDD 2016) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/node2vec.py)] +* **[Deep Graph Infomax](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.DeepGraphInfomax.html)** from Veličković *et al.*: [Deep Graph Infomax](https://arxiv.org/abs/1809.10341) (ICLR 2019) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/infomax_transductive.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/infomax_inductive.py)] +* **Deep Multiplex Graph Infomax** from Park *et al.*: [Unsupervised Attributed Multiplex Network Embedding](https://arxiv.org/abs/1911.06750) (AAAI 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/hetero/dmgi_unsup.py)] +* **[Masked Label Prediction](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.MaskLabel.html)** from Shi *et al.*: [Masked Label Prediction: Unified Message Passing Model for Semi-Supervised Classification](https://arxiv.org/abs/2009.03509) (CoRR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/unimp_arxiv.py)] +* **[PMLP](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.PMLP.html)** from Yang *et al.*: [Graph Neural Networks are Inherently Good Generalizers: Insights by Bridging GNNs and MLPs](https://arxiv.org/abs/2212.09034) (ICLR 2023) + +
+Expand to see all implemented GNN models... + +* **[Jumping Knowledge](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.JumpingKnowledge.html)** from Xu *et al.*: [Representation Learning on Graphs with Jumping Knowledge Networks](https://arxiv.org/abs/1806.03536) (ICML 2018) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/benchmark/kernel/gin.py#L54-L106)] +* A **[MetaLayer](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.MetaLayer.html)** for building any kind of graph network similar to the [TensorFlow Graph Nets library](https://github.com/deepmind/graph_nets) from Battaglia *et al.*: [Relational Inductive Biases, Deep Learning, and Graph Networks](https://arxiv.org/abs/1806.01261) (CoRR 2018) +* **[MetaPath2Vec](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.MetaPath2Vec.html)** from Dong *et al.*: [metapath2vec: Scalable Representation Learning for Heterogeneous Networks](https://ericdongyx.github.io/papers/KDD17-dong-chawla-swami-metapath2vec.pdf) (KDD 2017) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/hetero/metapath2vec.py)] +* All variants of **[Graph Autoencoders](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.GAE.html)** and **[Variational Autoencoders](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.VGAE.html)** from: + * [Variational Graph Auto-Encoders](https://arxiv.org/abs/1611.07308) from Kipf and Welling (NIPS-W 2016) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/autoencoder.py)] + * [Adversarially Regularized Graph Autoencoder for Graph Embedding](https://arxiv.org/abs/1802.04407) from Pan *et al.* (IJCAI 2018) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/argva_node_clustering.py)] + * [Simple and Effective Graph Autoencoders with One-Hop Linear Models](https://arxiv.org/abs/2001.07614) from Salha *et al.* (ECML 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/autoencoder.py)] +* **[SEAL](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/seal_link_pred.py)** from Zhang and Chen: [Link Prediction Based on Graph Neural Networks](https://arxiv.org/pdf/1802.09691.pdf) (NeurIPS 2018) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/seal_link_pred.py)] +* **[RENet](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.RENet.html)** from Jin *et al.*: [Recurrent Event Network for Reasoning over Temporal Knowledge Graphs](https://arxiv.org/abs/1904.05530) (ICLR-W 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/renet.py)] +* **[GraphUNet](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.GraphUNet.html)** from Gao and Ji: [Graph U-Nets](https://arxiv.org/abs/1905.05178) (ICML 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/graph_unet.py)] +* **[AttentiveFP](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.AttentiveFP.html)** from Xiong *et al.*: [Pushing the Boundaries of Molecular Representation for Drug Discovery with the Graph Attention Mechanism](https://pubs.acs.org/doi/10.1021/acs.jmedchem.9b00959) (J. Med. Chem. 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/attentive_fp.py)] +* **[DeepGCN](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.DeepGCNLayer.html)** and the **[GENConv](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.GENConv.html)** from Li *et al.*: [DeepGCNs: Can GCNs Go as Deep as CNNs?](https://arxiv.org/abs/1904.03751) (ICCV 2019) and [DeeperGCN: All You Need to Train Deeper GCNs](https://arxiv.org/abs/2006.07739) (CoRR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/ogbn_proteins_deepgcn.py)] +* **[RECT](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.RECT_L.html)** from Wang *et al.*: [Network Embedding with Completely-imbalanced Labels](https://ieeexplore.ieee.org/document/8979355) (TKDE 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/rect.py)] +* **[GNNExplainer](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.explain.algorithm.GNNExplainer.html)** from Ying *et al.*: [GNNExplainer: Generating Explanations for Graph Neural Networks](https://arxiv.org/abs/1903.03894) (NeurIPS 2019) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/explain/gnn_explainer.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/explain/gnn_explainer_ba_shapes.py), [**Example3**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/explain/gnn_explainer_link_pred.py)] +* **Graph-less Neural Networks** from Zhang *et al.*: [Graph-less Neural Networks: Teaching Old MLPs New Tricks via Distillation](https://arxiv.org/abs/2110.08727) (CoRR 2021) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/glnn.py)] +* **[LINKX](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.LINKX.html)** from Lim *et al.*: [Large Scale Learning on Non-Homophilous Graphs: +New Benchmarks and Strong Simple Methods](https://arxiv.org/abs/2110.14446) (NeurIPS 2021) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/linkx.py)] +* **[RevGNN](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.GroupAddRev.html)** from Li *et al.*: [Training Graph Neural with 1000 Layers](https://arxiv.org/abs/2106.07476) (ICML 2021) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/rev_gnn.py)] +* **[TransE](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.kge.TransE.html)** from Bordes *et al.*: [Translating Embeddings for Modeling Multi-Relational Data](https://proceedings.neurips.cc/paper/2013/file/1cecc7a77928ca8133fa24680a88d2f9-Paper.pdf) (NIPS 2013) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/kge_fb15k_237.py)] +* **[ComplEx](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.kge.ComplEx.html)** from Trouillon *et al.*: [Complex Embeddings for Simple Link Prediction](https://arxiv.org/abs/1606.06357) (ICML 2016) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/kge_fb15k_237.py)] +* **[DistMult](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.kge.DistMult.html)** from Yang *et al.*: [Embedding Entities and Relations for Learning and Inference in Knowledge Bases](https://arxiv.org/abs/1412.6575) (ICLR 2015) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/kge_fb15k_237.py)] +* **[RotatE](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.kge.RotatE.html)** from Sun *et al.*: [RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space](https://arxiv.org/abs/1902.10197) (ICLR 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/kge_fb15k_237.py)] +
+ +**GNN operators and utilities:** +PyG comes with a rich set of neural network operators that are commonly used in many GNN models. +They follow an extensible design: It is easy to apply these operators and graph utilities to existing GNN layers and models to further enhance model performance. + +* **[DropEdge](https://pytorch-geometric.readthedocs.io/en/latest/modules/utils.html#torch_geometric.utils.dropout_edge)** from Rong *et al.*: [DropEdge: Towards Deep Graph Convolutional Networks on Node Classification](https://openreview.net/forum?id=Hkx1qkrKPr) (ICLR 2020) +* **[DropNode](https://pytorch-geometric.readthedocs.io/en/latest/modules/utils.html#torch_geometric.utils.dropout_node)**, **[MaskFeature](https://pytorch-geometric.readthedocs.io/en/latest/modules/utils.html#torch_geometric.utils.mask_feature)** and **[AddRandomEdge](https://pytorch-geometric.readthedocs.io/en/latest/modules/utils.html#torch_geometric.utils.add_random_edge)** from You *et al.*: [Graph Contrastive Learning with Augmentations](https://arxiv.org/abs/2010.13902) (NeurIPS 2020) +* **[DropPath](https://pytorch-geometric.readthedocs.io/en/latest/modules/utils.html#torch_geometric.utils.dropout_path)** from Li *et al.*: [MaskGAE: Masked Graph Modeling Meets Graph Autoencoders](https://arxiv.org/abs/2205.10053) (arXiv 2022) +* **[ShuffleNode](https://pytorch-geometric.readthedocs.io/en/latest/modules/utils.html#torch_geometric.utils.shuffle_node)** from Veličković *et al.*: [Deep Graph Infomax](https://arxiv.org/abs/1809.10341) (ICLR 2019) +* **[GraphNorm](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.norm.GraphNorm.html)** from Cai *et al.*: [GraphNorm: A Principled Approach to Accelerating Graph Neural Network Training](https://proceedings.mlr.press/v139/cai21e.html) (ICML 2021) +* **[GDC](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.transforms.GDC.html)** from Klicpera *et al.*: [Diffusion Improves Graph Learning](https://arxiv.org/abs/1911.05485) (NeurIPS 2019) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/gcn.py)] + +
+Expand to see all implemented GNN operators and utilities... + +* **[GraphSizeNorm](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.norm.GraphSizeNorm.html)** from Dwivedi *et al.*: [Benchmarking Graph Neural Networks](https://arxiv.org/abs/2003.00982) (CoRR 2020) +* **[PairNorm](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.norm.PairNorm.html)** from Zhao and Akoglu: [PairNorm: Tackling Oversmoothing in GNNs](https://arxiv.org/abs/1909.12223) (ICLR 2020) +* **[MeanSubtractionNorm](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.norm.MeanSubtractionNorm.html)** from Yang *et al.*: [Revisiting "Over-smoothing" in Deep GCNs](https://arxiv.org/abs/2003.13663) (CoRR 2020) +* **[DiffGroupNorm](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.norm.DiffGroupNorm.html)** from Zhou *et al.*: [Towards Deeper Graph Neural Networks with Differentiable Group Normalization](https://arxiv.org/abs/2006.06972) (NeurIPS 2020) +* **[Tree Decomposition](https://pytorch-geometric.readthedocs.io/en/latest/modules/utils.html#torch_geometric.utils.tree_decomposition)** from Jin *et al.*: [Junction Tree Variational Autoencoder for Molecular Graph Generation](https://arxiv.org/abs/1802.04364) (ICML 2018) +* **[TGN](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.TGNMemory.html)** from Rossi *et al.*: [Temporal Graph Networks for Deep Learning on Dynamic Graphs](https://arxiv.org/abs/2006.10637) (GRL+ 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/tgn.py)] +* **[Weisfeiler Lehman Operator](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.WLConv.html)** from Weisfeiler and Lehman: [A Reduction of a Graph to a Canonical Form and an Algebra Arising During this Reduction](https://www.iti.zcu.cz/wl2018/pdf/wl_paper_translation.pdf) (Nauchno-Technicheskaya Informatsia 1968) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/wl_kernel.py)] +* **[Continuous Weisfeiler Lehman Operator](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.WLConvContinuous.html)** from Togninalli *et al.*: [Wasserstein Weisfeiler-Lehman Graph Kernels](https://arxiv.org/abs/1906.01277) (NeurIPS 2019) +* **[Label Propagation](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.LabelPropagation.html)** from Zhu and Ghahramani: [Learning from Labeled and Unlabeled Data with Label Propagation](http://mlg.eng.cam.ac.uk/zoubin/papers/CMU-CALD-02-107.pdf) (CMU-CALD 2002) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/label_prop.py)] +* **[Local Degree Profile](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.transforms.LocalDegreeProfile)** from Cai and Wang: [A Simple yet Effective Baseline for Non-attribute Graph Classification](https://arxiv.org/abs/1811.03508) (CoRR 2018) +* **[CorrectAndSmooth](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.models.CorrectAndSmooth.html)** from Huang *et al.*: [Combining Label Propagation And Simple Models Out-performs Graph Neural Networks](https://arxiv.org/abs/2010.13993) (CoRR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/correct_and_smooth.py)] +* **[Gini](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.functional.gini.html)** and **[BRO](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.functional.bro.html)** regularization from Henderson *et al.*: [Improving Molecular Graph Neural Network Explainability with Orthonormalization and Induced Sparsity](https://arxiv.org/abs/2105.04854) (ICML 2021) +* **[RootedEgoNets](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.transforms.RootedEgoNets)** and **[RootedRWSubgraph](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.transforms.RootedRWSubgraph)** from Zhao *et al.*: [From Stars to Subgraphs: Uplifting Any GNN with Local Structure Awareness](https://arxiv.org/abs/2110.03753) (ICLR 2022) +* **[FeaturePropagation](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.transforms.FeaturePropagation)** from Rossi *et al.*: [On the Unreasonable Effectiveness of Feature Propagation in Learning on Graphs with Missing Node Features](https://arxiv.org/abs/2111.12128) (CoRR 2021) +
+ +**Scalable GNNs:** +PyG supports the implementation of Graph Neural Networks that can scale to large-scale graphs. +Such application is challenging since the entire graph, its associated features and the GNN parameters cannot fit into GPU memory. +Many state-of-the-art scalability approaches tackle this challenge by sampling neighborhoods for mini-batch training, graph clustering and partitioning, or by using simplified GNN models. +These approaches have been implemented in PyG, and can benefit from the above GNN layers, operators and models. + +* **[NeighborLoader](https://pytorch-geometric.readthedocs.io/en/latest/modules/loader.html#torch_geometric.loader.NeighborLoader)** from Hamilton *et al.*: [Inductive Representation Learning on Large Graphs](https://arxiv.org/abs/1706.02216) (NIPS 2017) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/reddit.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/ogbn_products_sage.py), [**Example3**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/ogbn_products_gat.py), [**Example4**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/hetero/to_hetero_mag.py)] +* **[ClusterGCN](https://pytorch-geometric.readthedocs.io/en/latest/modules/loader.html#torch_geometric.loader.ClusterLoader)** from Chiang *et al.*: [Cluster-GCN: An Efficient Algorithm for Training Deep and Large Graph Convolutional Networks](https://arxiv.org/abs/1905.07953) (KDD 2019) [[**Example1**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/cluster_gcn_reddit.py), [**Example2**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/cluster_gcn_ppi.py)] +* **[GraphSAINT](https://pytorch-geometric.readthedocs.io/en/latest/modules/loader.html#torch_geometric.loader.GraphSAINTSampler)** from Zeng *et al.*: [GraphSAINT: Graph Sampling Based Inductive Learning Method](https://arxiv.org/abs/1907.04931) (ICLR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/graph_saint.py)] + +
+Expand to see all implemented scalable GNNs... + +* **[ShaDow](https://pytorch-geometric.readthedocs.io/en/latest/modules/loader.html#torch_geometric.loader.ShaDowKHopSampler)** from Zeng *et al.*: [Decoupling the Depth and Scope of Graph Neural Networks](https://arxiv.org/abs/2201.07858) (NeurIPS 2021) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/shadow.py)] +* **[SIGN](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.transforms.SIGN.html)** from Rossi *et al.*: [SIGN: Scalable Inception Graph Neural Networks](https://arxiv.org/abs/2004.11198) (CoRR 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/sign.py)] +* **[HGTLoader](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.loader.HGTLoader.html)** from Hu *et al.*: [Heterogeneous Graph Transformer](https://arxiv.org/abs/2003.01332) (WWW 2020) [[**Example**](https://github.com/pyg-team/pytorch_geometric/blob/master/examples/hetero/to_hetero_mag.py)] +
+ +## Installation + +PyG is available for Python 3.8 to Python 3.11. + +### Anaconda + +You can now install PyG via [Anaconda](https://anaconda.org/pyg/pyg) for all major OS/PyTorch/CUDA combinations 🤗 +If you have not yet installed PyTorch, install it via `conda` as described in the [official PyTorch documentation](https://pytorch.org/get-started/locally/). +Given that you have PyTorch installed (`>=1.8.0`), simply run + +``` +conda install pyg -c pyg +``` + +### PyPi + +From **PyG 2.3** onwards, you can install and use PyG **without any external library** required except for PyTorch. +For this, simply run + +``` +pip install torch_geometric +``` + +PyG 2.3 requires that at least PyTorch 1.11 is installed. + +### Additional Libraries + +If you want to utilize the full set of features from PyG, there exists several additional libraries you may want to install: + +* **[`pyg-lib`](https://github.com/pyg-team/pyg-lib)**: Heterogeneous GNN operators and graph sampling routines +* **[`torch-scatter`](https://github.com/rusty1s/pytorch_scatter)**: Accelerated and efficient sparse reductions +* **[`torch-sparse`](https://github.com/rusty1s/pytorch_sparse)**: [`SparseTensor`](https://pytorch-geometric.readthedocs.io/en/latest/advanced/sparse_tensor.html) support +* **[`torch-cluster`](https://github.com/rusty1s/pytorch_cluster)**: Graph clustering routines +* **[`torch-spline-conv`](https://github.com/rusty1s/pytorch_spline_conv)**: [`SplineConv`](https://pytorch-geometric.readthedocs.io/en/latest/generated/torch_geometric.nn.conv.SplineConv.html) support + +These packages come with their own CPU and GPU kernel implementations based on the [PyTorch C++/CUDA/hip(ROCm) extension interface](https://github.com/pytorch/extension-cpp). +For a basic usage of PyG, these dependencies are **fully optional**. +We recommend to start with a minimal installation, and install additional dependencies once you start to actually need them. + +For ease of installation of these extensions, we provide `pip` wheels for all major OS/PyTorch/CUDA combinations, see [here](https://data.pyg.org/whl). + +#### PyTorch 2.1 + +To install the binaries for PyTorch 2.1.0, simply run + +``` +pip install pyg_lib torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://data.pyg.org/whl/torch-2.1.0+${CUDA}.html +``` + +where `${CUDA}` should be replaced by either `cpu`, `cu118`, or `cu121` depending on your PyTorch installation. + +| | `cpu` | `cu118` | `cu121` | +|-------------|-------|---------|---------| +| **Linux** | ✅ | ✅ | ✅ | +| **Windows** | ✅ | ✅ | ✅ | +| **macOS** | ✅ | | | + +#### PyTorch 2.0 + +To install the binaries for PyTorch 2.0.0, simply run + +``` +pip install pyg_lib torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://data.pyg.org/whl/torch-2.0.0+${CUDA}.html +``` + +where `${CUDA}` should be replaced by either `cpu`, `cu117`, or `cu118` depending on your PyTorch installation. + +| | `cpu` | `cu117` | `cu118` | +|-------------|-------|---------|---------| +| **Linux** | ✅ | ✅ | ✅ | +| **Windows** | ✅ | ✅ | ✅ | +| **macOS** | ✅ | | | + +**Note:** Binaries of older versions are also provided for PyTorch 1.4.0, PyTorch 1.5.0, PyTorch 1.6.0, PyTorch 1.7.0/1.7.1, PyTorch 1.8.0/1.8.1, PyTorch 1.9.0, PyTorch 1.10.0/1.10.1/1.10.2, PyTorch 1.11.0, PyTorch 1.12.0/1.12.1 and PyTorch 1.13.0/1.13.1 (following the same procedure). +**For older versions, you might need to explicitly specify the latest supported version number** or install via `pip install --no-index` in order to prevent a manual installation from source. +You can look up the latest supported version number [here](https://data.pyg.org/whl). + +### Nightly and Master + +In case you want to experiment with the latest PyG features which are not fully released yet, either install the **nightly version** of PyG via + +``` +pip install pyg-nightly +``` + +or install PyG **from master** via + +``` +pip install git+https://github.com/pyg-team/pytorch_geometric.git +``` + +### ROCm Wheels + +The external [`pyg-rocm-build` repository](https://github.com/Looong01/pyg-rocm-build) provides wheels and detailed instructions on how to install PyG for ROCm. +If you have any questions about it, please open an issue [here](https://github.com/Looong01/pyg-rocm-build/issues). + +## Cite + +Please cite [our paper](https://arxiv.org/abs/1903.02428) (and the respective papers of the methods used) if you use this code in your own work: + +``` +@inproceedings{Fey/Lenssen/2019, + title={Fast Graph Representation Learning with {PyTorch Geometric}}, + author={Fey, Matthias and Lenssen, Jan E.}, + booktitle={ICLR Workshop on Representation Learning on Graphs and Manifolds}, + year={2019}, +} +``` + +Feel free to [email us](mailto:matthias.fey@tu-dortmund.de) if you wish your work to be listed in the [external resources](https://pytorch-geometric.readthedocs.io/en/latest/external/resources.html). +If you notice anything unexpected, please open an [issue](https://github.com/pyg-team/pytorch_geometric/issues) and let us know. +If you have any questions or are missing a specific feature, feel free [to discuss them with us](https://github.com/pyg-team/pytorch_geometric/discussions). +We are motivated to constantly make PyG even better. diff --git a/pytorch_geometric-2.3.1/benchmark/README.md b/pytorch_geometric-2.4.0/benchmark/README.md similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/README.md rename to pytorch_geometric-2.4.0/benchmark/README.md diff --git a/pytorch_geometric-2.3.1/benchmark/citation/README.md b/pytorch_geometric-2.4.0/benchmark/citation/README.md similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/citation/README.md rename to pytorch_geometric-2.4.0/benchmark/citation/README.md diff --git a/pytorch_geometric-2.3.1/benchmark/citation/__init__.py b/pytorch_geometric-2.4.0/benchmark/citation/__init__.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/citation/__init__.py rename to pytorch_geometric-2.4.0/benchmark/citation/__init__.py diff --git a/pytorch_geometric-2.3.1/benchmark/citation/appnp.py b/pytorch_geometric-2.4.0/benchmark/citation/appnp.py similarity index 97% rename from pytorch_geometric-2.3.1/benchmark/citation/appnp.py rename to pytorch_geometric-2.4.0/benchmark/citation/appnp.py index 8bb9add..53c56be 100644 --- a/pytorch_geometric-2.3.1/benchmark/citation/appnp.py +++ b/pytorch_geometric-2.4.0/benchmark/citation/appnp.py @@ -24,6 +24,7 @@ parser.add_argument('--inference', action='store_true') parser.add_argument('--profile', action='store_true') parser.add_argument('--bf16', action='store_true') +parser.add_argument('--compile', action='store_true') args = parser.parse_args() @@ -51,7 +52,7 @@ def forward(self, data): dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) permute_masks = random_planetoid_splits if args.random_splits else None run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, - args.early_stopping, args.inference, args.profile, args.bf16, + args.early_stopping, args.inference, args.profile, args.bf16, args.compile, permute_masks) if args.profile: diff --git a/pytorch_geometric-2.4.0/benchmark/citation/arma.py b/pytorch_geometric-2.4.0/benchmark/citation/arma.py new file mode 100644 index 0000000..b39b584 --- /dev/null +++ b/pytorch_geometric-2.4.0/benchmark/citation/arma.py @@ -0,0 +1,63 @@ +import argparse + +import torch +import torch.nn.functional as F +from citation import get_planetoid_dataset, random_planetoid_splits, run + +from torch_geometric.nn import ARMAConv +from torch_geometric.profile import rename_profile_file + +parser = argparse.ArgumentParser() +parser.add_argument('--dataset', type=str, required=True) +parser.add_argument('--random_splits', action='store_true') +parser.add_argument('--runs', type=int, default=100) +parser.add_argument('--epochs', type=int, default=1000) +parser.add_argument('--lr', type=float, default=0.01) +parser.add_argument('--weight_decay', type=float, default=0.0005) +parser.add_argument('--early_stopping', type=int, default=100) +parser.add_argument('--hidden', type=int, default=16) +parser.add_argument('--dropout', type=float, default=0.5) +parser.add_argument('--no_normalize_features', action='store_true') +parser.add_argument('--num_stacks', type=int, default=1) +parser.add_argument('--num_layers', type=int, default=1) +parser.add_argument('--shared_weights', action='store_true') +parser.add_argument('--skip_dropout', type=float, default=0.75) +parser.add_argument('--inference', action='store_true') +parser.add_argument('--profile', action='store_true') +parser.add_argument('--bf16', action='store_true') +parser.add_argument('--compile', action='store_true') +args = parser.parse_args() + + +class Net(torch.nn.Module): + def __init__(self, dataset): + super().__init__() + self.conv1 = ARMAConv(dataset.num_features, args.hidden, + args.num_stacks, args.num_layers, + args.shared_weights, dropout=args.skip_dropout) + self.conv2 = ARMAConv(args.hidden, dataset.num_classes, + args.num_stacks, args.num_layers, + args.shared_weights, dropout=args.skip_dropout) + + def reset_parameters(self): + self.conv1.reset_parameters() + self.conv2.reset_parameters() + + def forward(self, data): + x, edge_index = data.x, data.edge_index + x = F.relu(self.conv1(x, edge_index)) + x = F.dropout(x, p=args.dropout, training=self.training) + x = self.conv2(x, edge_index) + return F.log_softmax(x, dim=1) + + +dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) +permute_masks = random_planetoid_splits if args.random_splits else None +run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, + args.early_stopping, args.inference, args.profile, args.bf16, args.compile, + permute_masks) + +if args.profile: + rename_profile_file('citation', ARMAConv.__name__, args.dataset, + str(args.random_splits), + 'inference' if args.inference else 'train') diff --git a/pytorch_geometric-2.3.1/benchmark/citation/cheb.py b/pytorch_geometric-2.4.0/benchmark/citation/cheb.py similarity index 96% rename from pytorch_geometric-2.3.1/benchmark/citation/cheb.py rename to pytorch_geometric-2.4.0/benchmark/citation/cheb.py index 46f7d53..04b9868 100644 --- a/pytorch_geometric-2.3.1/benchmark/citation/cheb.py +++ b/pytorch_geometric-2.4.0/benchmark/citation/cheb.py @@ -22,6 +22,7 @@ parser.add_argument('--inference', action='store_true') parser.add_argument('--profile', action='store_true') parser.add_argument('--bf16', action='store_true') +parser.add_argument('--compile', action='store_true') args = parser.parse_args() @@ -46,7 +47,7 @@ def forward(self, data): dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) permute_masks = random_planetoid_splits if args.random_splits else None run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, - args.early_stopping, args.inference, args.profile, args.bf16, + args.early_stopping, args.inference, args.profile, args.bf16, args.compile, permute_masks) if args.profile: diff --git a/pytorch_geometric-2.3.1/benchmark/citation/datasets.py b/pytorch_geometric-2.4.0/benchmark/citation/datasets.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/citation/datasets.py rename to pytorch_geometric-2.4.0/benchmark/citation/datasets.py diff --git a/pytorch_geometric-2.4.0/benchmark/citation/gat.py b/pytorch_geometric-2.4.0/benchmark/citation/gat.py new file mode 100644 index 0000000..f9ed5d6 --- /dev/null +++ b/pytorch_geometric-2.4.0/benchmark/citation/gat.py @@ -0,0 +1,61 @@ +import argparse + +import torch +import torch.nn.functional as F +from citation import get_planetoid_dataset, random_planetoid_splits, run + +from torch_geometric.nn import GATConv +from torch_geometric.profile import rename_profile_file + +parser = argparse.ArgumentParser() +parser.add_argument('--dataset', type=str, required=True) +parser.add_argument('--random_splits', action='store_true') +parser.add_argument('--runs', type=int, default=100) +parser.add_argument('--epochs', type=int, default=1000) +parser.add_argument('--lr', type=float, default=0.005) +parser.add_argument('--weight_decay', type=float, default=0.0005) +parser.add_argument('--early_stopping', type=int, default=100) +parser.add_argument('--hidden', type=int, default=8) +parser.add_argument('--dropout', type=float, default=0.6) +parser.add_argument('--no_normalize_features', action='store_true') +parser.add_argument('--heads', type=int, default=8) +parser.add_argument('--output_heads', type=int, default=1) +parser.add_argument('--inference', action='store_true') +parser.add_argument('--profile', action='store_true') +parser.add_argument('--bf16', action='store_true') +parser.add_argument('--compile', action='store_true') +args = parser.parse_args() + + +class Net(torch.nn.Module): + def __init__(self, dataset): + super().__init__() + self.conv1 = GATConv(dataset.num_features, args.hidden, + heads=args.heads, dropout=args.dropout) + self.conv2 = GATConv(args.hidden * args.heads, dataset.num_classes, + heads=args.output_heads, concat=False, + dropout=args.dropout) + + def reset_parameters(self): + self.conv1.reset_parameters() + self.conv2.reset_parameters() + + def forward(self, data): + x, edge_index = data.x, data.edge_index + x = F.dropout(x, p=args.dropout, training=self.training) + x = F.elu(self.conv1(x, edge_index)) + x = F.dropout(x, p=args.dropout, training=self.training) + x = self.conv2(x, edge_index) + return F.log_softmax(x, dim=1) + + +dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) +permute_masks = random_planetoid_splits if args.random_splits else None +run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, + args.early_stopping, args.inference, args.profile, args.bf16, args.compile, + permute_masks) + +if args.profile: + rename_profile_file('citation', GATConv.__name__, args.dataset, + str(args.random_splits), + 'inference' if args.inference else 'train') diff --git a/pytorch_geometric-2.4.0/benchmark/citation/gcn.py b/pytorch_geometric-2.4.0/benchmark/citation/gcn.py new file mode 100644 index 0000000..589d836 --- /dev/null +++ b/pytorch_geometric-2.4.0/benchmark/citation/gcn.py @@ -0,0 +1,55 @@ +import argparse + +import torch +import torch.nn.functional as F +from citation import get_planetoid_dataset, random_planetoid_splits, run + +from torch_geometric.nn import GCNConv +from torch_geometric.profile import rename_profile_file + +parser = argparse.ArgumentParser() +parser.add_argument('--dataset', type=str, required=True) +parser.add_argument('--random_splits', action='store_true') +parser.add_argument('--runs', type=int, default=100) +parser.add_argument('--epochs', type=int, default=200) +parser.add_argument('--lr', type=float, default=0.01) +parser.add_argument('--weight_decay', type=float, default=0.0005) +parser.add_argument('--early_stopping', type=int, default=10) +parser.add_argument('--hidden', type=int, default=16) +parser.add_argument('--dropout', type=float, default=0.5) +parser.add_argument('--no_normalize_features', action='store_true') +parser.add_argument('--inference', action='store_true') +parser.add_argument('--profile', action='store_true') +parser.add_argument('--bf16', action='store_true') +parser.add_argument('--compile', action='store_true') +args = parser.parse_args() + + +class Net(torch.nn.Module): + def __init__(self, dataset): + super().__init__() + self.conv1 = GCNConv(dataset.num_features, args.hidden) + self.conv2 = GCNConv(args.hidden, dataset.num_classes) + + def reset_parameters(self): + self.conv1.reset_parameters() + self.conv2.reset_parameters() + + def forward(self, data): + x, edge_index = data.x, data.edge_index + x = F.relu(self.conv1(x, edge_index)) + x = F.dropout(x, p=args.dropout, training=self.training) + x = self.conv2(x, edge_index) + return F.log_softmax(x, dim=1) + + +dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) +permute_masks = random_planetoid_splits if args.random_splits else None +run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, + args.early_stopping, args.inference, args.profile, args.bf16, args.compile, + permute_masks) + +if args.profile: + rename_profile_file('citation', GCNConv.__name__, args.dataset, + str(args.random_splits), + 'inference' if args.inference else 'train') diff --git a/pytorch_geometric-2.3.1/benchmark/citation/inference.sh b/pytorch_geometric-2.4.0/benchmark/citation/inference.sh similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/citation/inference.sh rename to pytorch_geometric-2.4.0/benchmark/citation/inference.sh diff --git a/pytorch_geometric-2.3.1/benchmark/citation/run.sh b/pytorch_geometric-2.4.0/benchmark/citation/run.sh similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/citation/run.sh rename to pytorch_geometric-2.4.0/benchmark/citation/run.sh diff --git a/pytorch_geometric-2.4.0/benchmark/citation/sgc.py b/pytorch_geometric-2.4.0/benchmark/citation/sgc.py new file mode 100644 index 0000000..a11e177 --- /dev/null +++ b/pytorch_geometric-2.4.0/benchmark/citation/sgc.py @@ -0,0 +1,51 @@ +import argparse + +import torch +import torch.nn.functional as F +from citation import get_planetoid_dataset, random_planetoid_splits, run + +from torch_geometric.nn import SGConv +from torch_geometric.profile import rename_profile_file + +parser = argparse.ArgumentParser() +parser.add_argument('--dataset', type=str, required=True) +parser.add_argument('--random_splits', action='store_true') +parser.add_argument('--runs', type=int, default=100) +parser.add_argument('--epochs', type=int, default=200) +parser.add_argument('--lr', type=float, default=0.1) +parser.add_argument('--weight_decay', type=float, default=0.0005) +parser.add_argument('--early_stopping', type=int, default=10) +parser.add_argument('--no_normalize_features', action='store_true') +parser.add_argument('--K', type=int, default=2) +parser.add_argument('--inference', action='store_true') +parser.add_argument('--profile', action='store_true') +parser.add_argument('--bf16', action='store_true') +parser.add_argument('--compile', action='store_true') +args = parser.parse_args() + + +class Net(torch.nn.Module): + def __init__(self, dataset): + super().__init__() + self.conv1 = SGConv(dataset.num_features, dataset.num_classes, + K=args.K, cached=True) + + def reset_parameters(self): + self.conv1.reset_parameters() + + def forward(self, data): + x, edge_index = data.x, data.edge_index + x = self.conv1(x, edge_index) + return F.log_softmax(x, dim=1) + + +dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features) +permute_masks = random_planetoid_splits if args.random_splits else None +run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay, + args.early_stopping, args.inference, args.profile, args.bf16, args.compile, + permute_masks) + +if args.profile: + rename_profile_file('citation', SGConv.__name__, args.dataset, + str(args.random_splits), + 'inference' if args.inference else 'train') diff --git a/pytorch_geometric-2.3.1/benchmark/citation/statistics.py b/pytorch_geometric-2.4.0/benchmark/citation/statistics.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/citation/statistics.py rename to pytorch_geometric-2.4.0/benchmark/citation/statistics.py diff --git a/pytorch_geometric-2.4.0/benchmark/citation/train_eval.py b/pytorch_geometric-2.4.0/benchmark/citation/train_eval.py new file mode 100644 index 0000000..0e7a789 --- /dev/null +++ b/pytorch_geometric-2.4.0/benchmark/citation/train_eval.py @@ -0,0 +1,198 @@ +import time + +import torch +import torch.nn.functional as F +from torch import tensor +from torch.optim import Adam + +import torch_geometric +from torch_geometric.profile import timeit, torch_profile +from torch_geometric.utils import index_to_mask + +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + + +def random_planetoid_splits(data, num_classes): + # Set new random planetoid splits: + # * 20 * num_classes labels for training + # * 500 labels for validation + # * 1000 labels for testing + + indices = [] + for i in range(num_classes): + index = (data.y == i).nonzero().view(-1) + index = index[torch.randperm(index.size(0))] + indices.append(index) + + train_index = torch.cat([i[:20] for i in indices], dim=0) + + rest_index = torch.cat([i[20:] for i in indices], dim=0) + rest_index = rest_index[torch.randperm(rest_index.size(0))] + + data.train_mask = index_to_mask(train_index, size=data.num_nodes) + data.val_mask = index_to_mask(rest_index[:500], size=data.num_nodes) + data.test_mask = index_to_mask(rest_index[500:1500], size=data.num_nodes) + + return data + + +def run_train(dataset, model, runs, epochs, lr, weight_decay, early_stopping, + profiling, use_compile, permute_masks=None, logger=None): + val_losses, accs, durations = [], [], [] + if use_compile: + model = torch_geometric.compile(model) + + for run in range(runs): + data = dataset[0] + if permute_masks is not None: + data = permute_masks(data, dataset.num_classes) + data = data.to(device) + + model.to(device).reset_parameters() + optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay) + + if torch.cuda.is_available(): + torch.cuda.synchronize() + elif hasattr(torch.backends, + 'mps') and torch.backends.mps.is_available(): + try: + import torch.mps + torch.mps.synchronize() + except ImportError: + pass + + t_start = time.perf_counter() + + best_val_loss = float('inf') + test_acc = 0 + val_loss_history = [] + + for epoch in range(1, epochs + 1): + if run == runs - 1 and epoch == epochs: + with timeit(): + train(model, optimizer, data) + else: + train(model, optimizer, data) + eval_info = evaluate(model, data) + eval_info['epoch'] = epoch + + if logger is not None: + logger(eval_info) + + if eval_info['val_loss'] < best_val_loss: + best_val_loss = eval_info['val_loss'] + test_acc = eval_info['test_acc'] + + val_loss_history.append(eval_info['val_loss']) + if early_stopping > 0 and epoch > epochs // 2: + tmp = tensor(val_loss_history[-(early_stopping + 1):-1]) + if eval_info['val_loss'] > tmp.mean().item(): + break + + if torch.cuda.is_available(): + torch.cuda.synchronize() + elif hasattr(torch.backends, + 'mps') and torch.backends.mps.is_available(): + try: + import torch.mps + torch.mps.synchronize() + except ImportError: + pass + + t_end = time.perf_counter() + + val_losses.append(best_val_loss) + accs.append(test_acc) + durations.append(t_end - t_start) + loss, acc, duration = tensor(val_losses), tensor(accs), tensor(durations) + + print(f'Val Loss: {float(loss.mean()):.4f}, ' + f'Test Accuracy: {float(acc.mean()):.3f} ± {float(acc.std()):.3f}, ' + f'Duration: {float(duration.mean()):.3f}s') + + if profiling: + with torch_profile(): + train(model, optimizer, data) + + +@torch.no_grad() +def run_inference(dataset, model, epochs, profiling, bf16, use_compile, + permute_masks=None, logger=None): + data = dataset[0] + if permute_masks is not None: + data = permute_masks(data, dataset.num_classes) + data = data.to(device) + + model.to(device).reset_parameters() + if use_compile: + model = torch_geometric.compile(model) + + if torch.cuda.is_available(): + amp = torch.cuda.amp.autocast(enabled=False) + else: + amp = torch.cpu.amp.autocast(enabled=bf16) + if bf16: + data.x = data.x.to(torch.bfloat16) + + with amp: + for epoch in range(1, epochs + 1): + if epoch == epochs: + with timeit(): + inference(model, data) + else: + inference(model, data) + + if profiling: + with torch_profile(): + inference(model, data) + + +def run(dataset, model, runs, epochs, lr, weight_decay, early_stopping, + inference, profiling, bf16, use_compile, permute_masks=None, + logger=None): + if not inference: + run_train(dataset, model, runs, epochs, lr, weight_decay, + early_stopping, profiling, use_compile, permute_masks, + logger) + else: + run_inference(dataset, model, epochs, profiling, bf16, use_compile, + permute_masks, logger) + + +def train(model, optimizer, data): + model.train() + optimizer.zero_grad() + out = model(data) + loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask]) + loss.backward() + optimizer.step() + + +@torch.no_grad() +def evaluate(model, data): + model.eval() + + out = model(data) + + outs = {} + for key in ['train', 'val', 'test']: + mask = data[f'{key}_mask'] + loss = float(F.nll_loss(out[mask], data.y[mask])) + pred = out[mask].argmax(1) + acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item() + + outs[f'{key}_loss'] = loss + outs[f'{key}_acc'] = acc + + return outs + + +@torch.no_grad() +def inference(model, data): + model.eval() + model(data) diff --git a/pytorch_geometric-2.3.1/benchmark/inference/README.md b/pytorch_geometric-2.4.0/benchmark/inference/README.md similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/inference/README.md rename to pytorch_geometric-2.4.0/benchmark/inference/README.md diff --git a/pytorch_geometric-2.4.0/benchmark/inference/inference_benchmark.py b/pytorch_geometric-2.4.0/benchmark/inference/inference_benchmark.py new file mode 100644 index 0000000..35e8393 --- /dev/null +++ b/pytorch_geometric-2.4.0/benchmark/inference/inference_benchmark.py @@ -0,0 +1,339 @@ +import argparse +import warnings +from collections import defaultdict +from contextlib import nullcontext + +import torch + +from benchmark.utils import ( + emit_itt, + get_dataset_with_transformation, + get_model, + get_split_masks, + save_benchmark_data, + test, + write_to_csv, +) +from torch_geometric.loader import NeighborLoader +from torch_geometric.nn import PNAConv +from torch_geometric.profile import ( + rename_profile_file, + timeit, + torch_profile, + xpu_profile, +) + +supported_sets = { + 'ogbn-mag': ['rgat', 'rgcn'], + 'ogbn-products': ['edge_cnn', 'gat', 'gcn', 'pna', 'sage'], + 'Reddit': ['edge_cnn', 'gat', 'gcn', 'pna', 'sage'], +} + + +@torch.no_grad() +def full_batch_inference(model, data): + model.eval() + if hasattr(data, 'adj_t'): + edge_index = data.adj_t + else: + edge_index = data.edge_index + return model(data.x, edge_index) + + +def run(args: argparse.ArgumentParser): + csv_data = defaultdict(list) + + if args.write_csv == 'prof' and not args.profile: + warnings.warn("Cannot write profile data to CSV because profiling is " + "disabled") + + if args.device == 'xpu': + try: + import intel_extension_for_pytorch as ipex + except ImportError: + raise RuntimeError('XPU device requires IPEX to be installed') + + if ((args.device == 'cuda' and not torch.cuda.is_available()) + or (args.device == 'xpu' and not torch.xpu.is_available())): + raise RuntimeError(f'{args.device.upper()} is not available') + + if args.device == 'cuda' and args.full_batch: + raise RuntimeError('CUDA device is not suitable for full batch mode') + + device = torch.device(args.device) + + print('BENCHMARK STARTS') + print(f'Running on {args.device.upper()}') + for dataset_name in args.datasets: + assert dataset_name in supported_sets.keys( + ), f"Dataset {dataset_name} isn't supported." + print(f'Dataset: {dataset_name}') + load_time = timeit() if args.measure_load_time else nullcontext() + with load_time: + result = get_dataset_with_transformation(dataset_name, args.root, + args.use_sparse_tensor, + args.bf16) + dataset, num_classes, transformation = result + data = dataset.to(device) + hetero = True if dataset_name == 'ogbn-mag' else False + mask = ('paper', None) if dataset_name == 'ogbn-mag' else None + _, _, test_mask = get_split_masks(data, dataset_name) + degree = None + + if hetero and args.cached_loader: + args.cached_loader = False + print('Disabling CachedLoader, not supported in Hetero models') + if args.num_layers != [1] and not hetero and args.num_steps != -1: + raise ValueError("Layer-wise inference requires `steps=-1`") + + if args.device == 'cuda': + amp = torch.cuda.amp.autocast(enabled=False) + elif args.device == 'xpu': + amp = torch.xpu.amp.autocast(enabled=False) + else: + amp = torch.cpu.amp.autocast(enabled=args.bf16) + + if args.device == 'xpu' and args.warmup < 1: + print('XPU device requires warmup - setting warmup=1') + args.warmup = 1 + + inputs_channels = data[ + 'paper'].num_features if dataset_name == 'ogbn-mag' \ + else dataset.num_features + + for model_name in args.models: + if model_name not in supported_sets[dataset_name]: + print(f'Configuration of {dataset_name} + {model_name} ' + f'not supported. Skipping.') + continue + with_loader = not args.full_batch or (model_name == 'pna' + and degree is None) + print(f'Evaluation bench for {model_name}:') + + for batch_size in args.eval_batch_sizes: + num_nodes = data[ + 'paper'].num_nodes if hetero else data.num_nodes + sampler = torch.utils.data.RandomSampler( + range(num_nodes), num_samples=args.num_steps * batch_size + ) if args.num_steps != -1 and with_loader else None + kwargs = { + 'batch_size': batch_size, + 'shuffle': False, + 'num_workers': args.num_workers, + } + if not hetero: + subgraph_loader = NeighborLoader( + data, + num_neighbors=[-1], # layer-wise inference + input_nodes=mask, + sampler=sampler, + **kwargs, + ) if with_loader else None + if args.evaluate and not args.full_batch: + test_loader = NeighborLoader( + data, + num_neighbors=[-1], # layer-wise inference + input_nodes=test_mask, + sampler=None, + **kwargs, + ) + + for layers in args.num_layers: + num_neighbors = [args.hetero_num_neighbors] * layers + if hetero: + # batch-wise inference + subgraph_loader = NeighborLoader( + data, + num_neighbors=num_neighbors, + input_nodes=mask, + sampler=sampler, + **kwargs, + ) if with_loader else None + if args.evaluate and not args.full_batch: + test_loader = NeighborLoader( + data, + num_neighbors=num_neighbors, + input_nodes=test_mask, + sampler=None, + **kwargs, + ) + + for hidden_channels in args.num_hidden_channels: + print('----------------------------------------------') + print(f'Batch size={batch_size}, ' + f'Layers amount={layers}, ' + f'Num_neighbors={num_neighbors}, ' + f'Hidden features size={hidden_channels}, ' + f'Sparse tensor={args.use_sparse_tensor}') + params = { + 'inputs_channels': inputs_channels, + 'hidden_channels': hidden_channels, + 'output_channels': num_classes, + 'num_heads': args.num_heads, + 'num_layers': layers, + } + + if model_name == 'pna': + if degree is None: + degree = PNAConv.get_degree_histogram( + subgraph_loader) + print(f'Calculated degree for {dataset_name}.') + params['degree'] = degree + + model = get_model( + model_name, params, + metadata=data.metadata() if hetero else None) + model = model.to(device) + # TODO: Migrate to ModelHubMixin. + if args.ckpt_path: + state_dict = torch.load(args.ckpt_path) + model.load_state_dict(state_dict) + model.eval() + if args.device == 'xpu': + model = ipex.optimize(model) + + # Define context manager parameters: + if args.cpu_affinity and with_loader: + cpu_affinity = subgraph_loader.enable_cpu_affinity( + args.loader_cores) + else: + cpu_affinity = nullcontext() + if args.profile and args.device == 'xpu': + profile = xpu_profile(args.export_chrome_trace) + elif args.profile: + profile = torch_profile(args.export_chrome_trace, + csv_data, args.write_csv) + else: + profile = nullcontext() + itt = emit_itt( + ) if args.vtune_profile else nullcontext() + + if args.full_batch and args.use_sparse_tensor: + data = transformation(data) + + with cpu_affinity, amp, timeit() as time: + inference_kwargs = dict(cache=args.cached_loader) + if args.reuse_device_for_embeddings and not hetero: + inference_kwargs['embedding_device'] = device + for _ in range(args.warmup): + if args.full_batch: + full_batch_inference(model, data) + else: + model.inference( + subgraph_loader, + device, + progress_bar=True, + **inference_kwargs, + ) + if args.warmup > 0: + time.reset() + with itt, profile: + if args.full_batch: + y = full_batch_inference(model, data) + if args.evaluate: + mask = data.test_mask + pred = y[mask].argmax(1) + test_acc = pred.eq(data.y[mask]).sum( + ).item() / mask.sum().item() + print(f'Full Batch Test Accuracy: \ + {test_acc:.4f}') + else: + y = model.inference( + subgraph_loader, + device, + progress_bar=True, + **inference_kwargs, + ) + if args.evaluate: + test_acc = test( + model, + test_loader, + device, + hetero, + progress_bar=True, + ) + print(f'Mini Batch Test Accuracy: \ + {test_acc:.4f}') + + if args.profile and args.export_chrome_trace: + rename_profile_file(model_name, dataset_name, + str(batch_size), str(layers), + str(hidden_channels), + str(num_neighbors)) + total_time = time.duration + if args.num_steps != -1: + total_num_samples = args.num_steps * batch_size + else: + total_num_samples = num_nodes + throughput = total_num_samples / total_time + latency = total_time / total_num_samples * 1000 + print(f'Throughput: {throughput:.3f} samples/s') + print(f'Latency: {latency:.3f} ms') + + num_records = 1 + if args.write_csv == 'prof': + # For profiling with PyTorch, we save the top-5 + # most time consuming operations. Therefore, the + # same data should be entered for each of them. + num_records = 5 + for _ in range(num_records): + save_benchmark_data( + csv_data, + batch_size, + layers, + num_neighbors, + hidden_channels, + total_time, + model_name, + dataset_name, + args.use_sparse_tensor, + ) + if args.write_csv: + write_to_csv(csv_data, args.write_csv) + + +if __name__ == '__main__': + argparser = argparse.ArgumentParser('GNN inference benchmark') + add = argparser.add_argument + + add('--device', choices=['cpu', 'cuda', 'xpu'], default='cpu', + help='Device to run benchmark on') + add('--reuse-device-for-embeddings', action='store_true', + help='Use the same device for embeddings as specified in "--device"') + add('--datasets', nargs='+', + default=['ogbn-mag', 'ogbn-products', 'Reddit'], type=str) + add('--use-sparse-tensor', action='store_true', + help='use torch_sparse.SparseTensor as graph storage format') + add('--models', nargs='+', + default=['edge_cnn', 'gat', 'gcn', 'pna', 'rgat', 'rgcn'], type=str) + add('--root', default='../../data', type=str, + help='relative path to look for the datasets') + add('--eval-batch-sizes', nargs='+', default=[512, 1024, 2048, 4096, 8192], + type=int) + add('--num-layers', nargs='+', default=[2, 3], type=int) + add('--num-hidden-channels', nargs='+', default=[64, 128, 256], type=int) + add('--num-heads', default=2, type=int, + help='number of hidden attention heads, applies only for gat and rgat') + add('--hetero-num-neighbors', default=10, type=int, + help='number of neighbors to sample per layer for hetero workloads') + add('--num-workers', default=0, type=int) + add('--num-steps', default=-1, type=int, + help='number of steps, -1 means iterating through all the data') + add('--warmup', default=1, type=int) + add('--profile', action='store_true') + add('--vtune-profile', action='store_true') + add('--bf16', action='store_true') + add('--cpu-affinity', action='store_true', + help='Use DataLoader affinitzation.') + add('--loader-cores', nargs='+', default=[], type=int, + help="List of CPU core IDs to use for DataLoader workers") + add('--measure-load-time', action='store_true') + add('--full-batch', action='store_true', help='Use full batch mode') + add('--evaluate', action='store_true') + add('--ckpt_path', type=str, help='Checkpoint path for loading a model') + add('--write-csv', choices=[None, 'bench', 'prof'], default=None, + help='Write benchmark or PyTorch profile data to CSV') + add('--export-chrome-trace', default=True, type=bool, + help='Export chrome trace file. Works only with PyTorch profiler') + add('--cached-loader', action='store_true', help='Use CachedLoader') + run(argparser.parse_args()) diff --git a/pytorch_geometric-2.3.1/benchmark/kernel/README.md b/pytorch_geometric-2.4.0/benchmark/kernel/README.md similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/kernel/README.md rename to pytorch_geometric-2.4.0/benchmark/kernel/README.md diff --git a/pytorch_geometric-2.3.1/benchmark/kernel/__init__.py b/pytorch_geometric-2.4.0/benchmark/kernel/__init__.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/kernel/__init__.py rename to pytorch_geometric-2.4.0/benchmark/kernel/__init__.py diff --git a/pytorch_geometric-2.3.1/benchmark/kernel/asap.py b/pytorch_geometric-2.4.0/benchmark/kernel/asap.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/kernel/asap.py rename to pytorch_geometric-2.4.0/benchmark/kernel/asap.py diff --git a/pytorch_geometric-2.3.1/benchmark/kernel/datasets.py b/pytorch_geometric-2.4.0/benchmark/kernel/datasets.py similarity index 98% rename from pytorch_geometric-2.3.1/benchmark/kernel/datasets.py rename to pytorch_geometric-2.4.0/benchmark/kernel/datasets.py index e7accdb..0033db2 100644 --- a/pytorch_geometric-2.3.1/benchmark/kernel/datasets.py +++ b/pytorch_geometric-2.4.0/benchmark/kernel/datasets.py @@ -7,7 +7,7 @@ from torch_geometric.utils import degree -class NormalizedDegree(object): +class NormalizedDegree: def __init__(self, mean, std): self.mean = mean self.std = std diff --git a/pytorch_geometric-2.3.1/benchmark/kernel/diff_pool.py b/pytorch_geometric-2.4.0/benchmark/kernel/diff_pool.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/kernel/diff_pool.py rename to pytorch_geometric-2.4.0/benchmark/kernel/diff_pool.py diff --git a/pytorch_geometric-2.3.1/benchmark/kernel/edge_pool.py b/pytorch_geometric-2.4.0/benchmark/kernel/edge_pool.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/kernel/edge_pool.py rename to pytorch_geometric-2.4.0/benchmark/kernel/edge_pool.py diff --git a/pytorch_geometric-2.3.1/benchmark/kernel/gcn.py b/pytorch_geometric-2.4.0/benchmark/kernel/gcn.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/kernel/gcn.py rename to pytorch_geometric-2.4.0/benchmark/kernel/gcn.py diff --git a/pytorch_geometric-2.4.0/benchmark/kernel/gin.py b/pytorch_geometric-2.4.0/benchmark/kernel/gin.py new file mode 100644 index 0000000..4ee3492 --- /dev/null +++ b/pytorch_geometric-2.4.0/benchmark/kernel/gin.py @@ -0,0 +1,218 @@ +import torch +import torch.nn.functional as F +from torch.nn import BatchNorm1d as BN +from torch.nn import Linear, ReLU, Sequential + +from torch_geometric.nn import GINConv, JumpingKnowledge, global_mean_pool + + +class GIN0(torch.nn.Module): + def __init__(self, dataset, num_layers, hidden): + super().__init__() + self.conv1 = GINConv( + Sequential( + Linear(dataset.num_features, hidden), + ReLU(), + BN(hidden), + Linear(hidden, hidden), + ReLU(), + BN(hidden), + ), train_eps=False) + self.convs = torch.nn.ModuleList() + for i in range(num_layers - 1): + self.convs.append( + GINConv( + Sequential( + Linear(hidden, hidden), + ReLU(), + BN(hidden), + Linear(hidden, hidden), + ReLU(), + BN(hidden), + ), train_eps=False)) + self.lin1 = Linear(hidden, hidden) + self.lin2 = Linear(hidden, dataset.num_classes) + + def reset_parameters(self): + self.conv1.reset_parameters() + for conv in self.convs: + conv.reset_parameters() + self.lin1.reset_parameters() + self.lin2.reset_parameters() + + def forward(self, data): + x, edge_index, batch = data.x, data.edge_index, data.batch + x = self.conv1(x, edge_index) + for conv in self.convs: + x = conv(x, edge_index) + x = global_mean_pool(x, batch) + x = F.relu(self.lin1(x)) + x = F.dropout(x, p=0.5, training=self.training) + x = self.lin2(x) + return F.log_softmax(x, dim=-1) + + def __repr__(self): + return self.__class__.__name__ + + +class GIN0WithJK(torch.nn.Module): + def __init__(self, dataset, num_layers, hidden, mode='cat'): + super().__init__() + self.conv1 = GINConv( + Sequential( + Linear(dataset.num_features, hidden), + ReLU(), + BN(hidden), + Linear(hidden, hidden), + ReLU(), + BN(hidden), + ), train_eps=False) + self.convs = torch.nn.ModuleList() + for i in range(num_layers - 1): + self.convs.append( + GINConv( + Sequential( + Linear(hidden, hidden), + ReLU(), + BN(hidden), + Linear(hidden, hidden), + ReLU(), + BN(hidden), + ), train_eps=False)) + self.jump = JumpingKnowledge(mode) + if mode == 'cat': + self.lin1 = Linear(num_layers * hidden, hidden) + else: + self.lin1 = Linear(hidden, hidden) + self.lin2 = Linear(hidden, dataset.num_classes) + + def reset_parameters(self): + self.conv1.reset_parameters() + for conv in self.convs: + conv.reset_parameters() + self.jump.reset_parameters() + self.lin1.reset_parameters() + self.lin2.reset_parameters() + + def forward(self, data): + x, edge_index, batch = data.x, data.edge_index, data.batch + x = self.conv1(x, edge_index) + xs = [x] + for conv in self.convs: + x = conv(x, edge_index) + xs += [x] + x = self.jump(xs) + x = global_mean_pool(x, batch) + x = F.relu(self.lin1(x)) + x = F.dropout(x, p=0.5, training=self.training) + x = self.lin2(x) + return F.log_softmax(x, dim=-1) + + def __repr__(self): + return self.__class__.__name__ + + +class GIN(torch.nn.Module): + def __init__(self, dataset, num_layers, hidden): + super().__init__() + self.conv1 = GINConv( + Sequential( + Linear(dataset.num_features, hidden), + ReLU(), + BN(hidden), + Linear(hidden, hidden), + ReLU(), + BN(hidden), + ), train_eps=True) + self.convs = torch.nn.ModuleList() + for i in range(num_layers - 1): + self.convs.append( + GINConv( + Sequential( + Linear(hidden, hidden), + ReLU(), + BN(hidden), + Linear(hidden, hidden), + ReLU(), + BN(hidden), + ), train_eps=True)) + self.lin1 = Linear(hidden, hidden) + self.lin2 = Linear(hidden, dataset.num_classes) + + def reset_parameters(self): + self.conv1.reset_parameters() + for conv in self.convs: + conv.reset_parameters() + self.lin1.reset_parameters() + self.lin2.reset_parameters() + + def forward(self, data): + x, edge_index, batch = data.x, data.edge_index, data.batch + x = self.conv1(x, edge_index) + for conv in self.convs: + x = conv(x, edge_index) + x = global_mean_pool(x, batch) + x = F.relu(self.lin1(x)) + x = F.dropout(x, p=0.5, training=self.training) + x = self.lin2(x) + return F.log_softmax(x, dim=-1) + + def __repr__(self): + return self.__class__.__name__ + + +class GINWithJK(torch.nn.Module): + def __init__(self, dataset, num_layers, hidden, mode='cat'): + super().__init__() + self.conv1 = GINConv( + Sequential( + Linear(dataset.num_features, hidden), + ReLU(), + BN(hidden), + Linear(hidden, hidden), + ReLU(), + BN(hidden), + ), train_eps=True) + self.convs = torch.nn.ModuleList() + for i in range(num_layers - 1): + self.convs.append( + GINConv( + Sequential( + Linear(hidden, hidden), + ReLU(), + BN(hidden), + Linear(hidden, hidden), + ReLU(), + BN(hidden), + ), train_eps=True)) + self.jump = JumpingKnowledge(mode) + if mode == 'cat': + self.lin1 = Linear(num_layers * hidden, hidden) + else: + self.lin1 = Linear(hidden, hidden) + self.lin2 = Linear(hidden, dataset.num_classes) + + def reset_parameters(self): + self.conv1.reset_parameters() + for conv in self.convs: + conv.reset_parameters() + self.jump.reset_parameters() + self.lin1.reset_parameters() + self.lin2.reset_parameters() + + def forward(self, data): + x, edge_index, batch = data.x, data.edge_index, data.batch + x = self.conv1(x, edge_index) + xs = [x] + for conv in self.convs: + x = conv(x, edge_index) + xs += [x] + x = self.jump(xs) + x = global_mean_pool(x, batch) + x = F.relu(self.lin1(x)) + x = F.dropout(x, p=0.5, training=self.training) + x = self.lin2(x) + return F.log_softmax(x, dim=-1) + + def __repr__(self): + return self.__class__.__name__ diff --git a/pytorch_geometric-2.3.1/benchmark/kernel/global_attention.py b/pytorch_geometric-2.4.0/benchmark/kernel/global_attention.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/kernel/global_attention.py rename to pytorch_geometric-2.4.0/benchmark/kernel/global_attention.py diff --git a/pytorch_geometric-2.3.1/benchmark/kernel/graclus.py b/pytorch_geometric-2.4.0/benchmark/kernel/graclus.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/kernel/graclus.py rename to pytorch_geometric-2.4.0/benchmark/kernel/graclus.py diff --git a/pytorch_geometric-2.3.1/benchmark/kernel/graph_sage.py b/pytorch_geometric-2.4.0/benchmark/kernel/graph_sage.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/kernel/graph_sage.py rename to pytorch_geometric-2.4.0/benchmark/kernel/graph_sage.py diff --git a/pytorch_geometric-2.3.1/benchmark/kernel/main.py b/pytorch_geometric-2.4.0/benchmark/kernel/main.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/kernel/main.py rename to pytorch_geometric-2.4.0/benchmark/kernel/main.py diff --git a/pytorch_geometric-2.3.1/benchmark/kernel/main_performance.py b/pytorch_geometric-2.4.0/benchmark/kernel/main_performance.py similarity index 91% rename from pytorch_geometric-2.3.1/benchmark/kernel/main_performance.py rename to pytorch_geometric-2.4.0/benchmark/kernel/main_performance.py index 0871f81..c35737d 100644 --- a/pytorch_geometric-2.3.1/benchmark/kernel/main_performance.py +++ b/pytorch_geometric-2.4.0/benchmark/kernel/main_performance.py @@ -8,6 +8,7 @@ from graph_sage import GraphSAGE from train_eval import eval_acc, inference_run, train +import torch_geometric from torch_geometric import seed_everything from torch_geometric.loader import DataLoader from torch_geometric.profile import rename_profile_file, timeit, torch_profile @@ -32,9 +33,16 @@ parser.add_argument('--inference', action='store_true') parser.add_argument('--profile', action='store_true') parser.add_argument('--bf16', action='store_true') +parser.add_argument('--compile', action='store_true') args = parser.parse_args() -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + if torch.cuda.is_available(): amp = torch.cuda.amp.autocast(enabled=False) else: @@ -77,7 +85,8 @@ def run_train(): model = Model(dataset, num_layers, hidden).to(device) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) - + if args.compile: + model = torch_geometric.compile(model) loss_list = [] acc_list = [] for epoch in range(1, args.epochs + 1): @@ -116,7 +125,8 @@ def run_inference(): print(f'{dataset_name} - {model_name}- {num_layers} - {hidden}') model = Model(dataset, num_layers, hidden).to(device) - + if args.compile: + model = torch_geometric.compile(model) with amp: for epoch in range(1, args.epochs + 1): if epoch == args.epochs: diff --git a/pytorch_geometric-2.3.1/benchmark/kernel/sag_pool.py b/pytorch_geometric-2.4.0/benchmark/kernel/sag_pool.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/kernel/sag_pool.py rename to pytorch_geometric-2.4.0/benchmark/kernel/sag_pool.py diff --git a/pytorch_geometric-2.3.1/benchmark/kernel/set2set.py b/pytorch_geometric-2.4.0/benchmark/kernel/set2set.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/kernel/set2set.py rename to pytorch_geometric-2.4.0/benchmark/kernel/set2set.py diff --git a/pytorch_geometric-2.3.1/benchmark/kernel/sort_pool.py b/pytorch_geometric-2.4.0/benchmark/kernel/sort_pool.py similarity index 87% rename from pytorch_geometric-2.3.1/benchmark/kernel/sort_pool.py rename to pytorch_geometric-2.4.0/benchmark/kernel/sort_pool.py index 1d68a92..926a816 100644 --- a/pytorch_geometric-2.3.1/benchmark/kernel/sort_pool.py +++ b/pytorch_geometric-2.4.0/benchmark/kernel/sort_pool.py @@ -2,19 +2,19 @@ import torch.nn.functional as F from torch.nn import Conv1d, Linear -from torch_geometric.nn import SAGEConv, global_sort_pool +from torch_geometric.nn import SAGEConv, SortAggregation class SortPool(torch.nn.Module): def __init__(self, dataset, num_layers, hidden): super().__init__() - self.k = 30 self.conv1 = SAGEConv(dataset.num_features, hidden) self.convs = torch.nn.ModuleList() for i in range(num_layers - 1): self.convs.append(SAGEConv(hidden, hidden)) + self.pool = SortAggregation(k=30) self.conv1d = Conv1d(hidden, 32, 5) - self.lin1 = Linear(32 * (self.k - 5 + 1), hidden) + self.lin1 = Linear(32 * (30 - 5 + 1), hidden) self.lin2 = Linear(hidden, dataset.num_classes) def reset_parameters(self): @@ -30,7 +30,7 @@ def forward(self, data): x = F.relu(self.conv1(x, edge_index)) for conv in self.convs: x = F.relu(conv(x, edge_index)) - x = global_sort_pool(x, batch, self.k) + x = self.pool(x, batch) x = x.view(len(x), self.k, -1).permute(0, 2, 1) x = F.relu(self.conv1d(x)) x = x.view(len(x), -1) diff --git a/pytorch_geometric-2.3.1/benchmark/kernel/statistics.py b/pytorch_geometric-2.4.0/benchmark/kernel/statistics.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/kernel/statistics.py rename to pytorch_geometric-2.4.0/benchmark/kernel/statistics.py diff --git a/pytorch_geometric-2.3.1/benchmark/kernel/top_k.py b/pytorch_geometric-2.4.0/benchmark/kernel/top_k.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/kernel/top_k.py rename to pytorch_geometric-2.4.0/benchmark/kernel/top_k.py diff --git a/pytorch_geometric-2.4.0/benchmark/kernel/train_eval.py b/pytorch_geometric-2.4.0/benchmark/kernel/train_eval.py new file mode 100644 index 0000000..7c268b0 --- /dev/null +++ b/pytorch_geometric-2.4.0/benchmark/kernel/train_eval.py @@ -0,0 +1,170 @@ +import time + +import torch +import torch.nn.functional as F +from sklearn.model_selection import StratifiedKFold +from torch import tensor +from torch.optim import Adam + +from torch_geometric.loader import DataLoader +from torch_geometric.loader import DenseDataLoader as DenseLoader + +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + + +def cross_validation_with_val_set(dataset, model, folds, epochs, batch_size, + lr, lr_decay_factor, lr_decay_step_size, + weight_decay, logger=None): + + val_losses, accs, durations = [], [], [] + for fold, (train_idx, test_idx, + val_idx) in enumerate(zip(*k_fold(dataset, folds))): + + train_dataset = dataset[train_idx] + test_dataset = dataset[test_idx] + val_dataset = dataset[val_idx] + + if 'adj' in train_dataset[0]: + train_loader = DenseLoader(train_dataset, batch_size, shuffle=True) + val_loader = DenseLoader(val_dataset, batch_size, shuffle=False) + test_loader = DenseLoader(test_dataset, batch_size, shuffle=False) + else: + train_loader = DataLoader(train_dataset, batch_size, shuffle=True) + val_loader = DataLoader(val_dataset, batch_size, shuffle=False) + test_loader = DataLoader(test_dataset, batch_size, shuffle=False) + + model.to(device).reset_parameters() + optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay) + + if torch.cuda.is_available(): + torch.cuda.synchronize() + elif hasattr(torch.backends, + 'mps') and torch.backends.mps.is_available(): + try: + import torch.mps + torch.mps.synchronize() + except ImportError: + pass + + t_start = time.perf_counter() + + for epoch in range(1, epochs + 1): + train_loss = train(model, optimizer, train_loader) + val_losses.append(eval_loss(model, val_loader)) + accs.append(eval_acc(model, test_loader)) + eval_info = { + 'fold': fold, + 'epoch': epoch, + 'train_loss': train_loss, + 'val_loss': val_losses[-1], + 'test_acc': accs[-1], + } + + if logger is not None: + logger(eval_info) + + if epoch % lr_decay_step_size == 0: + for param_group in optimizer.param_groups: + param_group['lr'] = lr_decay_factor * param_group['lr'] + + if torch.cuda.is_available(): + torch.cuda.synchronize() + elif hasattr(torch.backends, + 'mps') and torch.backends.mps.is_available(): + torch.mps.synchronize() + + t_end = time.perf_counter() + durations.append(t_end - t_start) + + loss, acc, duration = tensor(val_losses), tensor(accs), tensor(durations) + loss, acc = loss.view(folds, epochs), acc.view(folds, epochs) + loss, argmin = loss.min(dim=1) + acc = acc[torch.arange(folds, dtype=torch.long), argmin] + + loss_mean = loss.mean().item() + acc_mean = acc.mean().item() + acc_std = acc.std().item() + duration_mean = duration.mean().item() + print(f'Val Loss: {loss_mean:.4f}, Test Accuracy: {acc_mean:.3f} ' + f'± {acc_std:.3f}, Duration: {duration_mean:.3f}') + + return loss_mean, acc_mean, acc_std + + +def k_fold(dataset, folds): + skf = StratifiedKFold(folds, shuffle=True, random_state=12345) + + test_indices, train_indices = [], [] + for _, idx in skf.split(torch.zeros(len(dataset)), dataset.data.y): + test_indices.append(torch.from_numpy(idx).to(torch.long)) + + val_indices = [test_indices[i - 1] for i in range(folds)] + + for i in range(folds): + train_mask = torch.ones(len(dataset), dtype=torch.bool) + train_mask[test_indices[i]] = 0 + train_mask[val_indices[i]] = 0 + train_indices.append(train_mask.nonzero(as_tuple=False).view(-1)) + + return train_indices, test_indices, val_indices + + +def num_graphs(data): + if hasattr(data, 'num_graphs'): + return data.num_graphs + else: + return data.x.size(0) + + +def train(model, optimizer, loader): + model.train() + + total_loss = 0 + for data in loader: + optimizer.zero_grad() + data = data.to(device) + out = model(data) + loss = F.nll_loss(out, data.y.view(-1)) + loss.backward() + total_loss += loss.item() * num_graphs(data) + optimizer.step() + return total_loss / len(loader.dataset) + + +def eval_acc(model, loader): + model.eval() + + correct = 0 + for data in loader: + data = data.to(device) + with torch.no_grad(): + pred = model(data).max(1)[1] + correct += pred.eq(data.y.view(-1)).sum().item() + return correct / len(loader.dataset) + + +def eval_loss(model, loader): + model.eval() + + loss = 0 + for data in loader: + data = data.to(device) + with torch.no_grad(): + out = model(data) + loss += F.nll_loss(out, data.y.view(-1), reduction='sum').item() + return loss / len(loader.dataset) + + +@torch.no_grad() +def inference_run(model, loader, bf16): + model.eval() + for data in loader: + data = data.to(device) + if bf16: + data.x = data.x.to(torch.bfloat16) + model(data) diff --git a/pytorch_geometric-2.4.0/benchmark/loader/neighbor_loader.py b/pytorch_geometric-2.4.0/benchmark/loader/neighbor_loader.py new file mode 100644 index 0000000..1732522 --- /dev/null +++ b/pytorch_geometric-2.4.0/benchmark/loader/neighbor_loader.py @@ -0,0 +1,135 @@ +import argparse +import ast +import os.path as osp +from contextlib import nullcontext +from timeit import default_timer + +import tqdm +from ogb.nodeproppred import PygNodePropPredDataset + +import torch_geometric.transforms as T +from torch_geometric.datasets import OGB_MAG +from torch_geometric.loader import NeighborLoader +from torch_geometric.profile import torch_profile + + +def run(args: argparse.ArgumentParser): + for dataset_name in args.datasets: + print(f"Dataset: {dataset_name}") + root = osp.join(args.root, dataset_name) + transform = T.ToSparseTensor( + remove_edge_index=False) if args.use_sparse_tensor else None + if dataset_name == 'mag': + transform = (T.ToUndirected(merge=True) if transform is None else + T.Compose([T.ToUndirected(merge=True), transform])) + dataset = OGB_MAG(root=root, transform=transform) + train_idx = ('paper', dataset[0]['paper'].train_mask) + eval_idx = ('paper', None) + neighbor_sizes = (args.hetero_neighbor_sizes + if args.hetero_neighbor_sizes else None) + else: + dataset = PygNodePropPredDataset(f'ogbn-{dataset_name}', root) + split_idx = dataset.get_idx_split() + train_idx = split_idx['train'] + eval_idx = None + neighbor_sizes = (args.homo_neighbor_sizes + if args.homo_neighbor_sizes else None) + + data = dataset[0].to(args.device) + average_times = [] + profile = torch_profile() if args.profile else nullcontext() + # run dataloader iteration + if neighbor_sizes is not None: + for num_neighbors in neighbor_sizes: + print(f'Training sampling with {num_neighbors} neighbors') + for batch_size in args.batch_sizes: + train_loader = NeighborLoader( + data, + num_neighbors=num_neighbors, + input_nodes=train_idx, + batch_size=batch_size, + shuffle=True, + num_workers=args.num_workers, + subgraph_type=args.subgraph_type, + ) + cpu_affinity = train_loader.enable_cpu_affinity( + args.loader_cores + ) if args.cpu_affinity else nullcontext() + runtimes = [] + num_iterations = 0 + with profile, cpu_affinity: + for runit in range(args.runs): + start = default_timer() + for batch in tqdm.tqdm(train_loader): + num_iterations += 1 + stop = default_timer() + runtimes.append(round(stop - start, 3)) + average_time = round(sum(runtimes) / args.runs, 3) + print(f'batch size={batch_size}, ' + f'iterations={num_iterations}, ' + f'runtimes={runtimes}, ' + f'average runtime={average_time}') + average_times.append(average_time) + eval_batch_sizes = (args.eval_batch_sizes + if args.eval_batch_sizes else None) + if eval_batch_sizes is not None: + print('Evaluation sampling with all neighbors') + for batch_size in eval_batch_sizes: + subgraph_loader = NeighborLoader( + data, + num_neighbors=[-1], + input_nodes=eval_idx, + batch_size=batch_size, + shuffle=False, + num_workers=args.num_workers, + ) + cpu_affinity = subgraph_loader.enable_cpu_affinity( + args.loader_cores) if args.cpu_affinity else nullcontext() + runtimes = [] + num_iterations = 0 + with profile, cpu_affinity: + for runit in range(args.runs): + start = default_timer() + for batch in tqdm.tqdm(subgraph_loader): + num_iterations += 1 + stop = default_timer() + runtimes.append(round(stop - start, 3)) + average_time = round(sum(runtimes) / args.runs, 3) + print(f'batch size={batch_size}, ' + f'iterations={num_iterations}, ' + f'runtimes={runtimes}, ' + f'average runtime={average_time}') + average_times.append(average_time) + print(f"Total time averages: {average_times}") + + +if __name__ == '__main__': + parser = argparse.ArgumentParser('NeighborLoader Sampling Benchmarking') + add = parser.add_argument + + add('--device', default='cpu') + add('--datasets', nargs="+", default=['arxiv', 'products', 'mag']) + add('--root', default='../../data') + add('--batch-sizes', default=[8192, 4096, 2048, 1024, 512], + type=ast.literal_eval) + add('--eval-batch-sizes', default=[16384, 8192, 4096, 2048, 1024, 512], + type=ast.literal_eval) + add('--homo-neighbor_sizes', default=[[10, 5], [15, 10, 5], [20, 15, 10]], + type=ast.literal_eval) + add('--hetero-neighbor_sizes', default=[[5], [10], [10, 5]], + type=ast.literal_eval) + add('--use-sparse-tensor', action='store_true', + help='use torch_sparse.SparseTensor as graph storage format') + add('--num-workers', type=int, default=0, + help="Number of DataLoader workers to use.") + add('--runs', type=int, default=3, + help="Number of iterations for each test setting.") + add('--profile', default=False, action='store_true', + help="Run torch.profiler.") + add('--cpu-affinity', default=False, action='store_true', + help="Use DataLoader affinitzation.") + add('--loader-cores', nargs='+', default=[], type=int, + help="List of CPU core IDs to use for DataLoader workers.") + add('--subgraph-type', type=str, default='directional', + help="The type of the returned subgraph (directional, bidirectional)") + run(parser.parse_args()) diff --git a/pytorch_geometric-2.3.1/benchmark/points/README.md b/pytorch_geometric-2.4.0/benchmark/points/README.md similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/points/README.md rename to pytorch_geometric-2.4.0/benchmark/points/README.md diff --git a/pytorch_geometric-2.3.1/benchmark/points/__init__.py b/pytorch_geometric-2.4.0/benchmark/points/__init__.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/points/__init__.py rename to pytorch_geometric-2.4.0/benchmark/points/__init__.py diff --git a/pytorch_geometric-2.3.1/benchmark/points/datasets.py b/pytorch_geometric-2.4.0/benchmark/points/datasets.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/points/datasets.py rename to pytorch_geometric-2.4.0/benchmark/points/datasets.py diff --git a/pytorch_geometric-2.3.1/benchmark/points/edge_cnn.py b/pytorch_geometric-2.4.0/benchmark/points/edge_cnn.py similarity index 95% rename from pytorch_geometric-2.3.1/benchmark/points/edge_cnn.py rename to pytorch_geometric-2.4.0/benchmark/points/edge_cnn.py index 0a0bcbf..d2017ca 100644 --- a/pytorch_geometric-2.3.1/benchmark/points/edge_cnn.py +++ b/pytorch_geometric-2.4.0/benchmark/points/edge_cnn.py @@ -21,6 +21,7 @@ parser.add_argument('--inference', action='store_true') parser.add_argument('--profile', action='store_true') parser.add_argument('--bf16', action='store_true') +parser.add_argument('--compile', action='store_true') args = parser.parse_args() @@ -60,7 +61,7 @@ def forward(self, pos, batch): model = Net(train_dataset.num_classes) run(train_dataset, test_dataset, model, args.epochs, args.batch_size, args.lr, args.lr_decay_factor, args.lr_decay_step_size, args.weight_decay, - args.inference, args.profile, args.bf16) + args.inference, args.profile, args.bf16, args.compile) if args.profile: rename_profile_file('points', DynamicEdgeConv.__name__) diff --git a/pytorch_geometric-2.3.1/benchmark/points/mpnn.py b/pytorch_geometric-2.4.0/benchmark/points/mpnn.py similarity index 96% rename from pytorch_geometric-2.3.1/benchmark/points/mpnn.py rename to pytorch_geometric-2.4.0/benchmark/points/mpnn.py index 68c66c6..3588433 100644 --- a/pytorch_geometric-2.3.1/benchmark/points/mpnn.py +++ b/pytorch_geometric-2.4.0/benchmark/points/mpnn.py @@ -21,6 +21,7 @@ parser.add_argument('--inference', action='store_true') parser.add_argument('--profile', action='store_true') parser.add_argument('--bf16', action='store_true') +parser.add_argument('--compile', action='store_true') args = parser.parse_args() @@ -77,7 +78,7 @@ def forward(self, pos, batch): model = Net(train_dataset.num_classes) run(train_dataset, test_dataset, model, args.epochs, args.batch_size, args.lr, args.lr_decay_factor, args.lr_decay_step_size, args.weight_decay, - args.inference, args.profile, args.bf16) + args.inference, args.profile, args.bf16, args.compile) if args.profile: rename_profile_file('points', NNConv.__name__) diff --git a/pytorch_geometric-2.3.1/benchmark/points/point_cnn.py b/pytorch_geometric-2.4.0/benchmark/points/point_cnn.py similarity index 95% rename from pytorch_geometric-2.3.1/benchmark/points/point_cnn.py rename to pytorch_geometric-2.4.0/benchmark/points/point_cnn.py index 2b804e4..e8b4a01 100644 --- a/pytorch_geometric-2.3.1/benchmark/points/point_cnn.py +++ b/pytorch_geometric-2.4.0/benchmark/points/point_cnn.py @@ -19,6 +19,7 @@ parser.add_argument('--inference', action='store_true') parser.add_argument('--profile', action='store_true') parser.add_argument('--bf16', action='store_true') +parser.add_argument('--compile', action='store_true') args = parser.parse_args() @@ -65,7 +66,7 @@ def forward(self, pos, batch): model = Net(train_dataset.num_classes) run(train_dataset, test_dataset, model, args.epochs, args.batch_size, args.lr, args.lr_decay_factor, args.lr_decay_step_size, args.weight_decay, - args.inference, args.profile, args.bf16) + args.inference, args.profile, args.bf16, args.compile) if args.profile: rename_profile_file('points', XConv.__name__) diff --git a/pytorch_geometric-2.3.1/benchmark/points/point_net.py b/pytorch_geometric-2.4.0/benchmark/points/point_net.py similarity index 95% rename from pytorch_geometric-2.3.1/benchmark/points/point_net.py rename to pytorch_geometric-2.4.0/benchmark/points/point_net.py index 56a19b5..fe1edf0 100644 --- a/pytorch_geometric-2.3.1/benchmark/points/point_net.py +++ b/pytorch_geometric-2.4.0/benchmark/points/point_net.py @@ -21,6 +21,7 @@ parser.add_argument('--inference', action='store_true') parser.add_argument('--profile', action='store_true') parser.add_argument('--bf16', action='store_true') +parser.add_argument('--compile', action='store_true') args = parser.parse_args() @@ -73,7 +74,7 @@ def forward(self, pos, batch): model = Net(train_dataset.num_classes) run(train_dataset, test_dataset, model, args.epochs, args.batch_size, args.lr, args.lr_decay_factor, args.lr_decay_step_size, args.weight_decay, - args.inference, args.profile, args.bf16) + args.inference, args.profile, args.bf16, args.compile) if args.profile: rename_profile_file('points', PointNetConv.__name__) diff --git a/pytorch_geometric-2.3.1/benchmark/points/spline_cnn.py b/pytorch_geometric-2.4.0/benchmark/points/spline_cnn.py similarity index 96% rename from pytorch_geometric-2.3.1/benchmark/points/spline_cnn.py rename to pytorch_geometric-2.4.0/benchmark/points/spline_cnn.py index 5ab5b01..b258639 100644 --- a/pytorch_geometric-2.3.1/benchmark/points/spline_cnn.py +++ b/pytorch_geometric-2.4.0/benchmark/points/spline_cnn.py @@ -19,6 +19,7 @@ parser.add_argument('--inference', action='store_true') parser.add_argument('--profile', action='store_true') parser.add_argument('--bf16', action='store_true') +parser.add_argument('--compile', action='store_true') args = parser.parse_args() @@ -74,7 +75,7 @@ def forward(self, pos, batch): model = Net(train_dataset.num_classes) run(train_dataset, test_dataset, model, args.epochs, args.batch_size, args.lr, args.lr_decay_factor, args.lr_decay_step_size, args.weight_decay, - args.inference, args.profile, args.bf16) + args.inference, args.profile, args.bf16, args.compile) if args.profile: rename_profile_file('points', SplineConv.__name__) diff --git a/pytorch_geometric-2.3.1/benchmark/points/statistics.py b/pytorch_geometric-2.4.0/benchmark/points/statistics.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/points/statistics.py rename to pytorch_geometric-2.4.0/benchmark/points/statistics.py diff --git a/pytorch_geometric-2.4.0/benchmark/points/train_eval.py b/pytorch_geometric-2.4.0/benchmark/points/train_eval.py new file mode 100644 index 0000000..7912ff4 --- /dev/null +++ b/pytorch_geometric-2.4.0/benchmark/points/train_eval.py @@ -0,0 +1,133 @@ +import time + +import torch +import torch.nn.functional as F +from torch.optim import Adam + +import torch_geometric +from torch_geometric.loader import DataLoader +from torch_geometric.profile import timeit, torch_profile + +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + + +def run_train(train_dataset, test_dataset, model, epochs, batch_size, + use_compile, lr, lr_decay_factor, lr_decay_step_size, + weight_decay): + model = model.to(device) + if use_compile: + model = torch_geometric.compile(model) + optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay) + + train_loader = DataLoader(train_dataset, batch_size, shuffle=True) + test_loader = DataLoader(test_dataset, batch_size, shuffle=False) + + for epoch in range(1, epochs + 1): + if torch.cuda.is_available(): + torch.cuda.synchronize() + elif (hasattr(torch.backends, 'mps') + and torch.backends.mps.is_available()): + import torch.mps + torch.mps.synchronize() + + t_start = time.perf_counter() + + train(model, optimizer, train_loader, device) + test_acc = test(model, test_loader, device) + + if torch.cuda.is_available(): + torch.cuda.synchronize() + elif (hasattr(torch.backends, 'mps') + and torch.backends.mps.is_available()): + import torch.mps + torch.mps.synchronize() + + t_end = time.perf_counter() + + print(f'Epoch: {epoch:03d}, Test: {test_acc:.4f}, ' + f'Duration: {t_end - t_start:.2f}') + + if epoch % lr_decay_step_size == 0: + for param_group in optimizer.param_groups: + param_group['lr'] = lr_decay_factor * param_group['lr'] + + +@torch.no_grad() +def run_inference(test_dataset, model, epochs, batch_size, profiling, bf16, + use_compile): + model = model.to(device) + if use_compile: + model = torch_geometric.compile(model) + test_loader = DataLoader(test_dataset, batch_size, shuffle=False) + + if torch.cuda.is_available(): + amp = torch.cuda.amp.autocast(enabled=False) + else: + amp = torch.cpu.amp.autocast(enabled=bf16) + + with amp: + for epoch in range(1, epochs + 1): + print("Epoch: ", epoch) + if epoch == epochs: + with timeit(): + inference(model, test_loader, device, bf16) + else: + inference(model, test_loader, device, bf16) + + if profiling: + with torch_profile(): + inference(model, test_loader, device, bf16) + + +def run(train_dataset, test_dataset, model, epochs, batch_size, lr, + lr_decay_factor, lr_decay_step_size, weight_decay, inference, + profiling, bf16, use_compile): + if not inference: + run_train(train_dataset, test_dataset, model, epochs, batch_size, + use_compile, lr, lr_decay_factor, lr_decay_step_size, + weight_decay) + else: + run_inference(test_dataset, model, epochs, batch_size, profiling, bf16, + use_compile) + + +def train(model, optimizer, train_loader, device): + model.train() + + for data in train_loader: + optimizer.zero_grad() + data = data.to(device) + out = model(data.pos, data.batch) + loss = F.nll_loss(out, data.y) + loss.backward() + optimizer.step() + + +@torch.no_grad() +def test(model, test_loader, device): + model.eval() + + correct = 0 + for data in test_loader: + data = data.to(device) + pred = model(data.pos, data.batch).max(1)[1] + correct += pred.eq(data.y).sum().item() + test_acc = correct / len(test_loader.dataset) + + return test_acc + + +@torch.no_grad() +def inference(model, test_loader, device, bf16): + model.eval() + for data in test_loader: + data = data.to(device) + if bf16: + data.pos = data.pos.to(torch.bfloat16) + model = model.to(torch.bfloat16) + model(data.pos, data.batch) diff --git a/pytorch_geometric-2.3.1/benchmark/runtime/README.md b/pytorch_geometric-2.4.0/benchmark/runtime/README.md similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/runtime/README.md rename to pytorch_geometric-2.4.0/benchmark/runtime/README.md diff --git a/pytorch_geometric-2.3.1/benchmark/runtime/__init__.py b/pytorch_geometric-2.4.0/benchmark/runtime/__init__.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/runtime/__init__.py rename to pytorch_geometric-2.4.0/benchmark/runtime/__init__.py diff --git a/pytorch_geometric-2.4.0/benchmark/runtime/dgl/gat.py b/pytorch_geometric-2.4.0/benchmark/runtime/dgl/gat.py new file mode 100644 index 0000000..1de2c4c --- /dev/null +++ b/pytorch_geometric-2.4.0/benchmark/runtime/dgl/gat.py @@ -0,0 +1,128 @@ +import dgl.function as fn +import torch +import torch.nn.functional as F +from dgl.nn.pytorch import EdgeSoftmax +from torch.nn import Parameter + +from torch_geometric.nn.inits import glorot, zeros + + +class GATConv(torch.nn.Module): + def __init__(self, g, in_channels, out_channels, heads=1, + negative_slope=0.2, dropout=0): + super().__init__() + + self.g = g + self.in_channels = in_channels + self.out_channels = out_channels + self.heads = heads + self.negative_slope = negative_slope + self.dropout = dropout + + self.weight = Parameter(torch.empty(in_channels, heads * out_channels)) + self.att = Parameter(torch.empty(1, heads, 2 * out_channels)) + self.bias = Parameter(torch.empty(heads * out_channels)) + self.reset_parameters() + + def reset_parameters(self): + glorot(self.weight) + glorot(self.att) + zeros(self.bias) + + def gat_msg(self, edge): + alpha = torch.cat([edge.src['x'], edge.dst['x']], dim=-1) + alpha = (alpha * self.att).sum(dim=-1) + alpha = F.leaky_relu(alpha, self.negative_slope) + return {'m': edge.src['x'], 'a': alpha} + + def gat_reduce(self, node): + alpha = torch.softmax(node.mailbox['a'], dim=1) + alpha = F.dropout(alpha, p=self.dropout, training=self.training) + x = (node.mailbox['m'] * alpha.unsqueeze(-1)).sum(dim=1) + return {'x': x} + + def forward(self, x): + x = torch.mm(x, self.weight).view(-1, self.heads, self.out_channels) + self.g.ndata['x'] = x + self.g.update_all(self.gat_msg, self.gat_reduce) + x = self.g.ndata.pop('x') + x = x.view(-1, self.heads * self.out_channels) + x = x + self.bias + return x + + +class GAT(torch.nn.Module): + def __init__(self, g, in_channels, out_channels): + super().__init__() + self.g = g + self.conv1 = GATConv(g, in_channels, 8, 8, 0.6, 0.2) + self.conv2 = GATConv(g, 64, out_channels, 1, 0.6, 0.2) + + def forward(self, x): + x = F.dropout(x, p=0.6, training=self.training) + x = F.elu(self.conv1(x)) + x = F.dropout(x, p=0.6, training=self.training) + x = self.conv2(x) + return F.log_softmax(x, dim=1) + + +class GATSPMVConv(torch.nn.Module): + def __init__(self, g, in_channels, out_channels, heads=1, + negative_slope=0.2, dropout=0): + super().__init__() + self.g = g + self.out_channels = out_channels + self.heads = heads + self.negative_slope = negative_slope + self.dropout = dropout + self.weight = Parameter(torch.empty(in_channels, heads * out_channels)) + self.att_l = Parameter(torch.empty(heads, out_channels, 1)) + self.att_r = Parameter(torch.empty(heads, out_channels, 1)) + self.bias = Parameter(torch.empty(heads * out_channels)) + self.softmax = EdgeSoftmax() + self.reset_parameters() + + def reset_parameters(self): + glorot(self.weight) + glorot(self.att_l) + glorot(self.att_r) + zeros(self.bias) + + def forward(self, x): + x = torch.matmul(x, self.weight) + x = x.reshape((x.size(0), self.heads, -1)) # NxHxD' + head_x = x.transpose(0, 1) # HxNxD' + a1 = torch.bmm(head_x, self.att_l).transpose(0, 1) # NxHx1 + a2 = torch.bmm(head_x, self.att_r).transpose(0, 1) # NxHx1 + self.g.ndata.update({'x': x, 'a1': a1, 'a2': a2}) + self.g.apply_edges(self.edge_attention) + self.edge_softmax() + self.g.update_all(fn.src_mul_edge('x', 'a', 'x'), fn.sum('x', 'x')) + x = self.g.ndata['x'] / self.g.ndata['z'] # NxHxD' + return x.view(-1, self.heads * self.out_channels) + + def edge_attention(self, edge): + a = F.leaky_relu(edge.src['a1'] + edge.dst['a2'], self.negative_slope) + return {'a': a} + + def edge_softmax(self): + alpha, normalizer = self.softmax(self.g.edata['a'], self.g) + self.g.ndata['z'] = normalizer + if self.training and self.dropout > 0: + alpha = F.dropout(alpha, p=self.dropout, training=True) + self.g.edata['a'] = alpha + + +class GATSPMV(torch.nn.Module): + def __init__(self, g, in_channels, out_channels): + super().__init__() + self.g = g + self.conv1 = GATSPMVConv(g, in_channels, 8, 8, 0.6, 0.2) + self.conv2 = GATSPMVConv(g, 64, out_channels, 1, 0.6, 0.2) + + def forward(self, x): + x = F.dropout(x, p=0.6, training=self.training) + x = F.elu(self.conv1(x)) + x = F.dropout(x, p=0.6, training=self.training) + x = self.conv2(x) + return F.log_softmax(x, dim=1) diff --git a/pytorch_geometric-2.4.0/benchmark/runtime/dgl/gcn.py b/pytorch_geometric-2.4.0/benchmark/runtime/dgl/gcn.py new file mode 100644 index 0000000..d92b6aa --- /dev/null +++ b/pytorch_geometric-2.4.0/benchmark/runtime/dgl/gcn.py @@ -0,0 +1,80 @@ +import dgl.function as fn +import torch +import torch.nn.functional as F +from torch.nn import Parameter + +from torch_geometric.nn.inits import glorot, zeros + + +class GCNConv(torch.nn.Module): + def __init__(self, g, in_channels, out_channels): + super().__init__() + self.g = g + self.weight = Parameter(torch.empty(in_channels, out_channels)) + self.bias = Parameter(torch.empty(out_channels)) + self.reset_parameters() + + def reset_parameters(self): + glorot(self.weight) + zeros(self.bias) + + def gcn_msg(self, edge): + return {'m': edge.src['x'] * edge.src['norm']} + + def gcn_reduce(self, node): + return {'x': node.mailbox['m'].sum(dim=1) * node.data['norm']} + + def forward(self, x): + self.g.ndata['x'] = torch.matmul(x, self.weight) + self.g.update_all(self.gcn_msg, self.gcn_reduce) + x = self.g.ndata.pop('x') + x = x + self.bias + return x + + +class GCN(torch.nn.Module): + def __init__(self, g, in_channels, out_channels): + super().__init__() + self.conv1 = GCNConv(g, in_channels, 16) + self.conv2 = GCNConv(g, 16, out_channels) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = F.dropout(x, training=self.training) + x = self.conv2(x) + return F.log_softmax(x, dim=1) + + +class GCNSPMVConv(torch.nn.Module): + def __init__(self, g, in_channels, out_channels): + super().__init__() + self.g = g + self.weight = Parameter(torch.empty(in_channels, out_channels)) + self.bias = Parameter(torch.empty(out_channels)) + self.reset_parameters() + + def reset_parameters(self): + glorot(self.weight) + zeros(self.bias) + + def forward(self, x): + x = torch.matmul(x, self.weight) + self.g.ndata['x'] = x * self.g.ndata['norm'] + self.g.update_all(fn.copy_src(src='x', out='m'), + fn.sum(msg='m', out='x')) + x = self.g.ndata.pop('x') * self.g.ndata['norm'] + x = x + self.bias + return x + + +class GCNSPMV(torch.nn.Module): + def __init__(self, g, in_channels, out_channels): + super().__init__() + self.conv1 = GCNSPMVConv(g, in_channels, 16) + self.conv2 = GCNSPMVConv(g, 16, out_channels) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = F.dropout(x, training=self.training) + x = self.conv2(x) + return F.log_softmax(x, dim=1) diff --git a/pytorch_geometric-2.3.1/benchmark/runtime/dgl/hidden.py b/pytorch_geometric-2.4.0/benchmark/runtime/dgl/hidden.py similarity index 91% rename from pytorch_geometric-2.3.1/benchmark/runtime/dgl/hidden.py rename to pytorch_geometric-2.4.0/benchmark/runtime/dgl/hidden.py index d38590e..69e19bd 100644 --- a/pytorch_geometric-2.3.1/benchmark/runtime/dgl/hidden.py +++ b/pytorch_geometric-2.4.0/benchmark/runtime/dgl/hidden.py @@ -5,7 +5,7 @@ warnings.filterwarnings('ignore') -class HiddenPrint(object): +class HiddenPrint: def __enter__(self): self._original_stdout = sys.stdout sys.stdout = open(os.devnull, 'w') diff --git a/pytorch_geometric-2.4.0/benchmark/runtime/dgl/main.py b/pytorch_geometric-2.4.0/benchmark/runtime/dgl/main.py new file mode 100644 index 0000000..9afc395 --- /dev/null +++ b/pytorch_geometric-2.4.0/benchmark/runtime/dgl/main.py @@ -0,0 +1,58 @@ +from itertools import product + +import dgl +import torch +from dgl import DGLGraph +from dgl.contrib.data import load_data +from dgl.data import citation_graph +from runtime.dgl.gat import GAT, GATSPMV +from runtime.dgl.gcn import GCN, GCNSPMV +from runtime.dgl.hidden import HiddenPrint +from runtime.dgl.rgcn import RGCN, RGCNSPMV +from runtime.dgl.train import train_runtime + +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + +with HiddenPrint(): + Cora = citation_graph.load_cora() + CiteSeer = citation_graph.load_citeseer() + PubMed = citation_graph.load_pubmed() + MUTAG = load_data('mutag') # fair comparison + +# One training run before we start tracking duration to warm up GPU. +g = DGLGraph(Cora.graph) +g.set_n_initializer(dgl.init.zero_initializer) +g.add_edges(g.nodes(), g.nodes()) +norm = torch.pow(g.in_degrees().float(), -0.5) +norm[torch.isinf(norm)] = 0 +g.ndata['norm'] = norm.unsqueeze(1).to(device) +model = GCNSPMV(g, Cora.features.shape[1], Cora.num_labels).to(device) +train_runtime(model, Cora, epochs=200, device=device) + +for d, Net in product([Cora, CiteSeer, PubMed], [GCN, GCNSPMV, GAT, GATSPMV]): + g = DGLGraph(d.graph) + g.set_n_initializer(dgl.init.zero_initializer) + g.add_edges(g.nodes(), g.nodes()) + norm = torch.pow(g.in_degrees().float(), -0.5) + norm[torch.isinf(norm)] = 0 + g.ndata['norm'] = norm.unsqueeze(1).to(device) + model = Net(g, d.features.shape[1], d.num_labels).to(device) + t = train_runtime(model, d, epochs=200, device=device) + print(f'{d.name} - {Net.__name__}: {t:.2f}s') + +for d, Net in product([MUTAG], [RGCN, RGCNSPMV]): + g = DGLGraph() + g.add_nodes(d.num_nodes) + g.add_edges(d.edge_src, d.edge_dst) + edge_type = torch.from_numpy(d.edge_type).to(device) + edge_norm = torch.from_numpy(d.edge_norm).to(device) + g.edata.update({'type': edge_type, 'norm': edge_norm}) + g.ndata['id'] = torch.arange(d.num_nodes, dtype=torch.long, device=device) + model = Net(g, d.num_nodes, d.num_classes, d.num_rels) + t = train_runtime(model, d, epochs=200, device=device) + print(f'{d.name} - {Net.__name__}: {t:.2f}s') diff --git a/pytorch_geometric-2.4.0/benchmark/runtime/dgl/rgcn.py b/pytorch_geometric-2.4.0/benchmark/runtime/dgl/rgcn.py new file mode 100644 index 0000000..6e4ed53 --- /dev/null +++ b/pytorch_geometric-2.4.0/benchmark/runtime/dgl/rgcn.py @@ -0,0 +1,149 @@ +import dgl.function as fn +import torch +import torch.nn.functional as F +from torch.nn import Parameter as Param + +from torch_geometric.nn.inits import uniform + + +class RGCNConv(torch.nn.Module): + def __init__(self, g, in_channels, out_channels, num_relations, num_bases): + super().__init__() + + self.g = g + self.in_channels = in_channels + self.out_channels = out_channels + self.num_relations = num_relations + self.num_bases = num_bases + + self.basis = Param(torch.empty(num_bases, in_channels, out_channels)) + self.att = Param(torch.empty(num_relations, num_bases)) + self.root = Param(torch.empty(in_channels, out_channels)) + self.bias = Param(torch.empty(out_channels)) + + self.reset_parameters() + + def reset_parameters(self): + size = self.num_bases * self.in_channels + uniform(size, self.basis) + uniform(size, self.att) + uniform(size, self.root) + uniform(size, self.bias) + + def rgcn_reduce(self, node): + return {'x': node.mailbox['m'].sum(dim=1)} + + def forward(self, x): + self.w = torch.matmul(self.att, self.basis.view(self.num_bases, -1)) + self.w = self.w.view(self.num_relations, self.in_channels, + self.out_channels) + + if x is None: + + def msg_func(edge): + w = self.w.view(-1, self.out_channels) + index = edge.data['type'] * self.in_channels + edge.src['id'] + m = w.index_select(0, index) * edge.data['norm'].unsqueeze(1) + return {'m': m} + else: + self.g.ndata['x'] = x + + def msg_func(edge): + w = self.w.index_select(0, edge.data['type']) + m = torch.bmm(edge.src['x'].unsqueeze(1), w).squeeze() + m = m * edge.data['norm'].unsqueeze(1) + return {'m': m} + + self.g.update_all(msg_func, self.rgcn_reduce) + out = self.g.ndata.pop('x') + + if x is None: + out = out + self.root + else: + out = out + torch.matmul(x, self.root) + + out = out + self.bias + return out + + +class RGCN(torch.nn.Module): + def __init__(self, g, in_channels, out_channels, num_relations): + super().__init__() + self.conv1 = RGCNConv(g, in_channels, 16, num_relations, num_bases=30) + self.conv2 = RGCNConv(g, 16, out_channels, num_relations, num_bases=30) + + def forward(self, x): + x = F.relu(self.conv1(None)) + x = self.conv2(x) + return F.log_softmax(x, dim=1) + + +class RGCNSPMVConv(torch.nn.Module): + def __init__(self, g, in_channels, out_channels, num_relations, num_bases): + super().__init__() + + self.g = g + self.in_channels = in_channels + self.out_channels = out_channels + self.num_relations = num_relations + self.num_bases = num_bases + + self.basis = Param(torch.empty(num_bases, in_channels, out_channels)) + self.att = Param(torch.empty(num_relations, num_bases)) + self.root = Param(torch.empty(in_channels, out_channels)) + self.bias = Param(torch.empty(out_channels)) + + self.reset_parameters() + + def reset_parameters(self): + size = self.num_bases * self.in_channels + uniform(size, self.basis) + uniform(size, self.att) + uniform(size, self.root) + uniform(size, self.bias) + + def forward(self, x): + self.w = torch.matmul(self.att, self.basis.view(self.num_bases, -1)) + self.w = self.w.view(self.num_relations, self.in_channels, + self.out_channels) + + if x is None: + + def msg_func(edge): + w = self.w.view(-1, self.out_channels) + index = edge.data['type'] * self.in_channels + edge.src['id'] + m = w.index_select(0, index) * edge.data['norm'].unsqueeze(1) + return {'m': m} + else: + self.g.ndata['x'] = x + + def msg_func(edge): + w = self.w.index_select(0, edge.data['type']) + m = torch.bmm(edge.src['x'].unsqueeze(1), w).squeeze() + m = m * edge.data['norm'].unsqueeze(1) + return {'m': m} + + self.g.update_all(msg_func, fn.sum(msg='m', out='x')) + out = self.g.ndata.pop('x') + + if x is None: + out = out + self.root + else: + out = out + torch.matmul(x, self.root) + + out = out + self.bias + return out + + +class RGCNSPMV(torch.nn.Module): + def __init__(self, g, in_channels, out_channels, num_relations): + super().__init__() + self.conv1 = RGCNSPMVConv(g, in_channels, 16, num_relations, + num_bases=30) + self.conv2 = RGCNSPMVConv(g, 16, out_channels, num_relations, + num_bases=30) + + def forward(self, x): + x = F.relu(self.conv1(None)) + x = self.conv2(x) + return F.log_softmax(x, dim=1) diff --git a/pytorch_geometric-2.4.0/benchmark/runtime/dgl/train.py b/pytorch_geometric-2.4.0/benchmark/runtime/dgl/train.py new file mode 100644 index 0000000..1823e1a --- /dev/null +++ b/pytorch_geometric-2.4.0/benchmark/runtime/dgl/train.py @@ -0,0 +1,39 @@ +import time + +import torch +import torch.nn.functional as F + + +def train_runtime(model, data, epochs, device): + if hasattr(data, 'features'): + x = torch.tensor(data.features, dtype=torch.float, device=device) + else: + x = None + mask = data.train_mask if hasattr(data, 'train_mask') else data.train_idx + y = torch.tensor(data.labels, dtype=torch.long, device=device)[mask] + + model = model.to(device) + model.train() + optimizer = torch.optim.Adam(model.parameters(), lr=0.01) + + if torch.cuda.is_available(): + torch.cuda.synchronize() + elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + import torch.mps + torch.mps.synchronize() + t_start = time.perf_counter() + + for epoch in range(epochs): + optimizer.zero_grad() + out = model(x) + loss = F.nll_loss(out[mask], y.view(-1)) + loss.backward() + optimizer.step() + + if torch.cuda.is_available(): + torch.cuda.synchronize() + elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + torch.mps.synchronize() + t_end = time.perf_counter() + + return t_end - t_start diff --git a/pytorch_geometric-2.3.1/benchmark/runtime/gat.py b/pytorch_geometric-2.4.0/benchmark/runtime/gat.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/runtime/gat.py rename to pytorch_geometric-2.4.0/benchmark/runtime/gat.py diff --git a/pytorch_geometric-2.3.1/benchmark/runtime/gcn.py b/pytorch_geometric-2.4.0/benchmark/runtime/gcn.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/runtime/gcn.py rename to pytorch_geometric-2.4.0/benchmark/runtime/gcn.py diff --git a/pytorch_geometric-2.4.0/benchmark/runtime/main.py b/pytorch_geometric-2.4.0/benchmark/runtime/main.py new file mode 100644 index 0000000..80a5656 --- /dev/null +++ b/pytorch_geometric-2.4.0/benchmark/runtime/main.py @@ -0,0 +1,37 @@ +import os.path as osp +from itertools import product + +import torch +from runtime.gat import GAT +from runtime.gcn import GCN +from runtime.rgcn import RGCN +from runtime.train import train_runtime + +from torch_geometric.datasets import Entities, Planetoid + +if torch.cuda.is_available(): + device = torch.device('cuda') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + device = torch.device('mps') +else: + device = torch.device('cpu') + +root = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data') +Cora = Planetoid(osp.join(root, 'Cora'), 'Cora') +CiteSeer = Planetoid(osp.join(root, 'CiteSeer'), 'CiteSeer') +PubMed = Planetoid(osp.join(root, 'PubMed'), 'PubMed') +MUTAG = Entities(osp.join(root, 'EntitiesMUTAG'), 'MUTAG') + +# One training run before we start tracking duration to warm up GPU. +model = GCN(Cora.num_features, Cora.num_classes) +train_runtime(model, Cora[0], epochs=200, device=device) + +for d, Net in product([Cora, CiteSeer, PubMed], [GCN, GAT]): + model = Net(d.num_features, d.num_classes) + t = train_runtime(model, d[0], epochs=200, device=device) + print(f'{str(d)[:-2]} - {Net.__name__}: {t:.2f}s') + +for d, Net in product([MUTAG], [RGCN]): + model = Net(d[0].num_nodes, d.num_classes, d.num_relations) + t = train_runtime(model, d[0], epochs=200, device=device) + print(f'{str(d)[:-2]} - {Net.__name__}: {t:.2f}s') diff --git a/pytorch_geometric-2.3.1/benchmark/runtime/rgcn.py b/pytorch_geometric-2.4.0/benchmark/runtime/rgcn.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/runtime/rgcn.py rename to pytorch_geometric-2.4.0/benchmark/runtime/rgcn.py diff --git a/pytorch_geometric-2.4.0/benchmark/runtime/train.py b/pytorch_geometric-2.4.0/benchmark/runtime/train.py new file mode 100644 index 0000000..7dacfa5 --- /dev/null +++ b/pytorch_geometric-2.4.0/benchmark/runtime/train.py @@ -0,0 +1,35 @@ +import time + +import torch +import torch.nn.functional as F + + +def train_runtime(model, data, epochs, device): + optimizer = torch.optim.Adam(model.parameters(), lr=0.01) + model = model.to(device) + data = data.to(device) + model.train() + mask = data.train_mask if 'train_mask' in data else data.train_idx + y = data.y[mask] if 'train_mask' in data else data.train_y + + if torch.cuda.is_available(): + torch.cuda.synchronize() + elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + import torch.mps + torch.mps.synchronize() + t_start = time.perf_counter() + + for epoch in range(epochs): + optimizer.zero_grad() + out = model(data) + loss = F.nll_loss(out[mask], y) + loss.backward() + optimizer.step() + + if torch.cuda.is_available(): + torch.cuda.synchronize() + elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + torch.mps.synchronize() + t_end = time.perf_counter() + + return t_end - t_start diff --git a/pytorch_geometric-2.3.1/benchmark/setup.py b/pytorch_geometric-2.4.0/benchmark/setup.py similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/setup.py rename to pytorch_geometric-2.4.0/benchmark/setup.py diff --git a/pytorch_geometric-2.3.1/benchmark/training/README.md b/pytorch_geometric-2.4.0/benchmark/training/README.md similarity index 100% rename from pytorch_geometric-2.3.1/benchmark/training/README.md rename to pytorch_geometric-2.4.0/benchmark/training/README.md diff --git a/pytorch_geometric-2.3.1/benchmark/training/training_benchmark.py b/pytorch_geometric-2.4.0/benchmark/training/training_benchmark.py similarity index 76% rename from pytorch_geometric-2.3.1/benchmark/training/training_benchmark.py rename to pytorch_geometric-2.4.0/benchmark/training/training_benchmark.py index 2fc9b11..6c65a94 100644 --- a/pytorch_geometric-2.3.1/benchmark/training/training_benchmark.py +++ b/pytorch_geometric-2.4.0/benchmark/training/training_benchmark.py @@ -14,11 +14,18 @@ get_model, get_split_masks, save_benchmark_data, + test, write_to_csv, ) +from torch_geometric import compile from torch_geometric.loader import NeighborLoader from torch_geometric.nn import PNAConv -from torch_geometric.profile import rename_profile_file, timeit, torch_profile +from torch_geometric.profile import ( + rename_profile_file, + timeit, + torch_profile, + xpu_profile, +) supported_sets = { 'ogbn-mag': ['rgat', 'rgcn'], @@ -26,6 +33,15 @@ 'Reddit': ['edge_cnn', 'gat', 'gcn', 'pna', 'sage'], } +device_conditions = { + 'cpu': (lambda: True), + 'cuda': (lambda: torch.cuda.is_available()), + 'mps': + (lambda: + (hasattr(torch.backends, 'mps') and torch.backends.mps.is_available())), + 'xpu': (lambda: torch.xpu.is_available()), +} + def train_homo(model, loader, optimizer, device, progress_bar=True, desc="", trim=False): @@ -34,7 +50,7 @@ def train_homo(model, loader, optimizer, device, progress_bar=True, desc="", for batch in loader: optimizer.zero_grad() batch = batch.to(device) - if hasattr(batch, 'adj_t'): + if 'adj_t' in batch: edge_index = batch.adj_t else: edge_index = batch.edge_index @@ -65,7 +81,7 @@ def train_hetero(model, loader, optimizer, device, progress_bar=True, desc="", for batch in loader: optimizer.zero_grad() batch = batch.to(device) - if len(batch.adj_t_dict) > 0: + if 'adj_t' in batch: edge_index_dict = batch.adj_t_dict else: edge_index_dict = batch.edge_index_dict @@ -78,51 +94,29 @@ def train_hetero(model, loader, optimizer, device, progress_bar=True, desc="", optimizer.step() -@torch.no_grad() -def test(model, loader, device, hetero, progress_bar=True, desc="") -> None: - if progress_bar: - loader = tqdm(loader, desc=desc) - total_examples = total_correct = 0 - if hetero: - for batch in loader: - batch = batch.to(device) - if len(batch.adj_t_dict) > 0: - edge_index_dict = batch.adj_t_dict - else: - edge_index_dict = batch.edge_index_dict - out = model(batch.x_dict, edge_index_dict) - batch_size = batch['paper'].batch_size - out = out['paper'][:batch_size] - pred = out.argmax(dim=-1) - - total_examples += batch_size - total_correct += int((pred == batch['paper'].y[:batch_size]).sum()) - else: - for batch in loader: - batch = batch.to(device) - if hasattr(batch, 'adj_t'): - edge_index = batch.adj_t - else: - edge_index = batch.edge_index - out = model(batch.x, edge_index) - batch_size = batch.batch_size - out = out[:batch_size] - pred = out.argmax(dim=-1) +def run(args: argparse.ArgumentParser): + csv_data = defaultdict(list) - total_examples += batch_size - total_correct += int((pred == batch.y[:batch_size]).sum()) - return total_correct / total_examples + if args.write_csv == 'prof' and not args.profile: + warnings.warn("Cannot write profile data to CSV because profiling is " + "disabled") + if args.device == 'xpu': + try: + import intel_extension_for_pytorch as ipex + except ImportError: + raise RuntimeError('XPU device requires IPEX to be installed') -def run(args: argparse.ArgumentParser): - csv_data = defaultdict(list) + if not device_conditions[args.device](): + raise RuntimeError(f'{args.device.upper()} is not available') + device = torch.device(args.device) - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # If we use a custom number of steps, then we need to use RandomSampler, # which already does shuffle. shuffle = False if args.num_steps != -1 else True print('BENCHMARK STARTS') + print(f'Running on {args.device.upper()}') for dataset_name in args.datasets: assert dataset_name in supported_sets.keys( ), f"Dataset {dataset_name} isn't supported." @@ -134,10 +128,19 @@ def run(args: argparse.ArgumentParser): hetero = True if dataset_name == 'ogbn-mag' else False mask, val_mask, test_mask = get_split_masks(data, dataset_name) degree = None - if torch.cuda.is_available(): + + if args.device == 'cpu': + amp = torch.cpu.amp.autocast(enabled=args.bf16) + elif args.device == 'cuda': amp = torch.cuda.amp.autocast(enabled=False) + elif args.device == 'xpu': + amp = torch.xpu.amp.autocast(enabled=False) else: - amp = torch.cpu.amp.autocast(enabled=args.bf16) + amp = nullcontext() + + if args.device == 'xpu' and args.warmup < 1: + print('XPU device requires warmup - setting warmup=1') + args.warmup = 1 inputs_channels = data[ 'paper'].num_features if dataset_name == 'ogbn-mag' \ @@ -179,7 +182,6 @@ def run(args: argparse.ArgumentParser): data, input_nodes=mask, sampler=sampler, - filter_per_worker=args.filter_per_worker, **kwargs, ) if args.evaluate: @@ -187,14 +189,12 @@ def run(args: argparse.ArgumentParser): data, input_nodes=val_mask, sampler=None, - filter_per_worker=args.filter_per_worker, **kwargs, ) test_loader = NeighborLoader( data, input_nodes=test_mask, sampler=None, - filter_per_worker=args.filter_per_worker, **kwargs, ) for hidden_channels in args.num_hidden_channels: @@ -225,9 +225,17 @@ def run(args: argparse.ArgumentParser): metadata=data.metadata() if hetero else None) model = model.to(device) model.train() + + if args.compile: + model = compile(model, dynamic=True) + optimizer = torch.optim.Adam(model.parameters(), lr=0.001) + if args.device == 'xpu': + model, optimizer = ipex.optimize( + model, optimizer=optimizer) + progress_bar = False if args.no_progress_bar else True train = train_hetero if hetero else train_homo @@ -277,15 +285,23 @@ def run(args: argparse.ArgumentParser): print(f'Test Accuracy: {test_acc:.4f}') if args.profile: - with torch_profile(): + if args.device == 'xpu': + profile = xpu_profile( + args.export_chrome_trace) + else: + profile = torch_profile( + args.export_chrome_trace, csv_data, + args.write_csv) + with profile: train(model, subgraph_loader, optimizer, device, progress_bar=progress_bar, desc="Profile training") - rename_profile_file(model_name, dataset_name, - str(batch_size), - str(layers), - str(hidden_channels), - str(num_neighbors)) + if args.export_chrome_trace: + rename_profile_file( + model_name, dataset_name, + str(batch_size), str(layers), + str(hidden_channels), + str(num_neighbors)) total_time = t.duration if args.num_steps != -1: @@ -297,19 +313,34 @@ def run(args: argparse.ArgumentParser): print(f'Throughput: {throughput:.3f} samples/s') print(f'Latency: {latency:.3f} ms') - save_benchmark_data(csv_data, batch_size, layers, - num_neighbors, hidden_channels, - total_time, model_name, - dataset_name, - args.use_sparse_tensor) + num_records = 1 + if args.write_csv == 'prof': + # For profiling with PyTorch, we save the top-5 + # most time consuming operations. Therefore, the + # same data should be entered for each of them. + num_records = 5 + for _ in range(num_records): + save_benchmark_data( + csv_data, + batch_size, + layers, + num_neighbors, + hidden_channels, + total_time, + model_name, + dataset_name, + args.use_sparse_tensor, + ) if args.write_csv: - write_to_csv(csv_data, training=True) + write_to_csv(csv_data, args.write_csv, training=True) if __name__ == '__main__': argparser = argparse.ArgumentParser('GNN training benchmark') add = argparser.add_argument + add('--device', choices=['cpu', 'cuda', 'mps', 'xpu'], default='cpu', + help='Device to run benchmark on') add('--datasets', nargs='+', default=['ogbn-mag', 'ogbn-products', 'Reddit'], type=str) add('--use-sparse-tensor', action='store_true', @@ -340,12 +371,14 @@ def run(args: argparse.ArgumentParser): help="Use DataLoader affinitzation.") add('--loader-cores', nargs='+', default=[], type=int, help="List of CPU core IDs to use for DataLoader workers.") - add('--filter-per-worker', action='store_true', - help='Enable filter-per-worker feature of the dataloader.') add('--measure-load-time', action='store_true') add('--evaluate', action='store_true') - add('--write-csv', action='store_true', help='Write benchmark data to csv') + add('--write-csv', choices=[None, 'bench', 'prof'], default=None, + help='Write benchmark or PyTorch profile data to CSV') + add('--export-chrome-trace', default=True, type=bool, + help='Export chrome trace file. Works only with PyTorch profiler') add('--trim', action='store_true', help="Use `trim_to_layer` optimization") + add('--compile', action='store_true') args = argparser.parse_args() run(args) diff --git a/pytorch_geometric-2.4.0/benchmark/utils/__init__.py b/pytorch_geometric-2.4.0/benchmark/utils/__init__.py new file mode 100644 index 0000000..d97451a --- /dev/null +++ b/pytorch_geometric-2.4.0/benchmark/utils/__init__.py @@ -0,0 +1,17 @@ +from .utils import emit_itt +from .utils import get_dataset, get_dataset_with_transformation +from .utils import get_model +from .utils import get_split_masks +from .utils import save_benchmark_data, write_to_csv +from .utils import test + +__all__ = [ + 'emit_itt', + 'get_dataset', + 'get_dataset_with_transformation', + 'get_model', + 'get_split_masks', + 'save_benchmark_data', + 'write_to_csv', + 'test', +] diff --git a/pytorch_geometric-2.3.1/benchmark/utils/hetero_gat.py b/pytorch_geometric-2.4.0/benchmark/utils/hetero_gat.py similarity index 91% rename from pytorch_geometric-2.3.1/benchmark/utils/hetero_gat.py rename to pytorch_geometric-2.4.0/benchmark/utils/hetero_gat.py index b1bda5c..c67fe1c 100644 --- a/pytorch_geometric-2.3.1/benchmark/utils/hetero_gat.py +++ b/pytorch_geometric-2.4.0/benchmark/utils/hetero_gat.py @@ -16,13 +16,13 @@ def forward(self, x_dict, edge_index_dict): return self.model(x_dict, edge_index_dict) @torch.no_grad() - def inference(self, loader, device, progress_bar=False): + def inference(self, loader, device, progress_bar=False, **kwargs): self.model.eval() if progress_bar: loader = tqdm(loader, desc="Inference") for batch in loader: batch = batch.to(device) - if len(batch.adj_t_dict) > 0: + if 'adj_t' in batch: self.model(batch.x_dict, batch.adj_t_dict) else: self.model(batch.x_dict, batch.edge_index_dict) @@ -35,7 +35,7 @@ def test(self, x, loader, device, progress_bar=False): loader = tqdm(loader, desc="Evaluate") for batch in loader: batch = batch.to(device) - if len(batch.adj_t_dict) > 0: + if 'adj_t' in batch: out = self.model(batch.x_dict, batch.adj_t_dict) else: out = self.model(batch.x_dict, batch.edge_index_dict) diff --git a/pytorch_geometric-2.3.1/benchmark/utils/hetero_sage.py b/pytorch_geometric-2.4.0/benchmark/utils/hetero_sage.py similarity index 91% rename from pytorch_geometric-2.3.1/benchmark/utils/hetero_sage.py rename to pytorch_geometric-2.4.0/benchmark/utils/hetero_sage.py index 8b71b15..2f616b6 100644 --- a/pytorch_geometric-2.3.1/benchmark/utils/hetero_sage.py +++ b/pytorch_geometric-2.4.0/benchmark/utils/hetero_sage.py @@ -15,13 +15,13 @@ def forward(self, x_dict, edge_index_dict): return self.model(x_dict, edge_index_dict) @torch.no_grad() - def inference(self, loader, device, progress_bar=False): + def inference(self, loader, device, progress_bar=False, **kwargs): self.model.eval() if progress_bar: loader = tqdm(loader, desc="Inference") for batch in loader: batch = batch.to(device) - if len(batch.adj_t_dict) > 0: + if 'adj_t' in batch: self.model(batch.x_dict, batch.adj_t_dict) else: self.model(batch.x_dict, batch.edge_index_dict) @@ -34,7 +34,7 @@ def test(self, loader, device, progress_bar=False): loader = tqdm(loader, desc="Evaluate") for batch in loader: batch = batch.to(device) - if len(batch.adj_t_dict) > 0: + if 'adj_t' in batch: out = self.model(batch.x_dict, batch.adj_t_dict) else: out = self.model(batch.x_dict, batch.edge_index_dict) diff --git a/pytorch_geometric-2.4.0/benchmark/utils/utils.py b/pytorch_geometric-2.4.0/benchmark/utils/utils.py new file mode 100644 index 0000000..5b2dd3d --- /dev/null +++ b/pytorch_geometric-2.4.0/benchmark/utils/utils.py @@ -0,0 +1,196 @@ +import os +import os.path as osp +from datetime import datetime + +import torch +from ogb.nodeproppred import PygNodePropPredDataset +from tqdm import tqdm + +import torch_geometric.transforms as T +from torch_geometric.data import HeteroData +from torch_geometric.datasets import OGB_MAG, Reddit +from torch_geometric.nn import GAT, GCN, PNA, EdgeCNN, GraphSAGE +from torch_geometric.utils import index_to_mask + +from .hetero_gat import HeteroGAT +from .hetero_sage import HeteroGraphSAGE + +try: + from torch.autograd.profiler import emit_itt +except ImportError: + from contextlib import contextmanager + + @contextmanager + def emit_itt(*args, **kwargs): + yield + + +models_dict = { + 'edge_cnn': EdgeCNN, + 'gat': GAT, + 'gcn': GCN, + 'pna': PNA, + 'sage': GraphSAGE, + 'rgat': HeteroGAT, + 'rgcn': HeteroGraphSAGE, +} + + +def get_dataset_with_transformation(name, root, use_sparse_tensor=False, + bf16=False): + path = osp.join(osp.dirname(osp.realpath(__file__)), root, name) + transform = T.ToSparseTensor( + remove_edge_index=False) if use_sparse_tensor else None + if name == 'ogbn-mag': + if transform is None: + transform = T.ToUndirected(merge=True) + else: + transform = T.Compose([T.ToUndirected(merge=True), transform]) + dataset = OGB_MAG(root=path, preprocess='metapath2vec', + transform=transform) + elif name == 'ogbn-products': + if transform is None: + transform = T.RemoveDuplicatedEdges() + else: + transform = T.Compose([T.RemoveDuplicatedEdges(), transform]) + + dataset = PygNodePropPredDataset('ogbn-products', root=path, + transform=transform) + + elif name == 'Reddit': + dataset = Reddit(root=path, transform=transform) + + data = dataset[0] + + if name == 'ogbn-products': + split_idx = dataset.get_idx_split() + data.train_mask = index_to_mask(split_idx['train'], + size=data.num_nodes) + data.val_mask = index_to_mask(split_idx['valid'], size=data.num_nodes) + data.test_mask = index_to_mask(split_idx['test'], size=data.num_nodes) + data.y = data.y.squeeze() + + if bf16: + if isinstance(data, HeteroData): + for node_type in data.node_types: + data[node_type].x = data[node_type].x.to(torch.bfloat16) + else: + data.x = data.x.to(torch.bfloat16) + + return data, dataset.num_classes, transform + + +def get_dataset(name, root, use_sparse_tensor=False, bf16=False): + data, num_classes, _ = get_dataset_with_transformation( + name, root, use_sparse_tensor, bf16) + return data, num_classes + + +def get_model(name, params, metadata=None): + Model = models_dict.get(name, None) + assert Model is not None, f'Model {name} not supported!' + + if name == 'rgat': + return Model(metadata, params['hidden_channels'], params['num_layers'], + params['output_channels'], params['num_heads']) + + if name == 'rgcn': + return Model(metadata, params['hidden_channels'], params['num_layers'], + params['output_channels']) + + if name == 'gat': + return Model(params['inputs_channels'], params['hidden_channels'], + params['num_layers'], params['output_channels'], + heads=params['num_heads']) + + if name == 'pna': + return Model(params['inputs_channels'], params['hidden_channels'], + params['num_layers'], params['output_channels'], + aggregators=['mean', 'min', 'max', 'std'], + scalers=['identity', 'amplification', + 'attenuation'], deg=params['degree']) + + return Model(params['inputs_channels'], params['hidden_channels'], + params['num_layers'], params['output_channels']) + + +def get_split_masks(data, dataset_name): + if dataset_name == 'ogbn-mag': + train_mask = ('paper', data['paper'].train_mask) + test_mask = ('paper', data['paper'].test_mask) + val_mask = ('paper', data['paper'].val_mask) + else: + train_mask = data.train_mask + val_mask = data.val_mask + test_mask = data.test_mask + return train_mask, val_mask, test_mask + + +def save_benchmark_data(csv_data, batch_size, layers, num_neighbors, + hidden_channels, total_time, model_name, dataset_name, + use_sparse_tensor): + config = f'Batch size={batch_size}, ' \ + f'#Layers={layers}, ' \ + f'#Neighbors={num_neighbors}, ' \ + f'#Hidden features={hidden_channels}' + csv_data['DATE'].append(datetime.now().date()) + csv_data['TIME (s)'].append(round(total_time, 2)) + csv_data['MODEL'].append(model_name) + csv_data['DATASET'].append(dataset_name) + csv_data['CONFIG'].append(config) + csv_data['SPARSE'].append(use_sparse_tensor) + + +def write_to_csv(csv_data, write_csv='bench', training=False): + import pandas as pd + results_path = osp.join(osp.dirname(osp.realpath(__file__)), '../results/') + os.makedirs(results_path, exist_ok=True) + + name = 'training' if training else 'inference' + if write_csv == 'bench': + csv_file_name = f'TOTAL_{name}_benchmark.csv' + else: + csv_file_name = f'TOTAL_prof_{name}_benchmark.csv' + csv_path = osp.join(results_path, csv_file_name) + index_label = 'TEST_ID' if write_csv == 'bench' else 'ID' + + with_header = not osp.exists(csv_path) + df = pd.DataFrame(csv_data) + df.to_csv(csv_path, mode='a', index_label=index_label, header=with_header) + + +@torch.no_grad() +def test(model, loader, device, hetero, progress_bar=True, + desc="Evaluation") -> None: + if progress_bar: + loader = tqdm(loader, desc=desc) + total_examples = total_correct = 0 + if hetero: + for batch in loader: + batch = batch.to(device) + if 'adj_t' in batch: + edge_index_dict = batch.adj_t_dict + else: + edge_index_dict = batch.edge_index_dict + out = model(batch.x_dict, edge_index_dict) + batch_size = batch['paper'].batch_size + out = out['paper'][:batch_size] + pred = out.argmax(dim=-1) + + total_examples += batch_size + total_correct += int((pred == batch['paper'].y[:batch_size]).sum()) + else: + for batch in loader: + batch = batch.to(device) + if 'adj_t' in batch: + edge_index = batch.adj_t + else: + edge_index = batch.edge_index + out = model(batch.x, edge_index) + batch_size = batch.batch_size + out = out[:batch_size] + pred = out.argmax(dim=-1) + + total_examples += batch_size + total_correct += int((pred == batch.y[:batch_size]).sum()) + return total_correct / total_examples diff --git a/pytorch_geometric-2.3.1/codecov.yml b/pytorch_geometric-2.4.0/codecov.yml similarity index 80% rename from pytorch_geometric-2.3.1/codecov.yml rename to pytorch_geometric-2.4.0/codecov.yml index 019fd7b..c2a07ad 100644 --- a/pytorch_geometric-2.3.1/codecov.yml +++ b/pytorch_geometric-2.4.0/codecov.yml @@ -1,4 +1,4 @@ -# see https://docs.codecov.io/docs/codecov-yaml +# See: https://docs.codecov.io/docs/codecov-yaml coverage: range: 80..100 round: down diff --git a/pytorch_geometric-2.4.0/conda/pyg/README.md b/pytorch_geometric-2.4.0/conda/pyg/README.md new file mode 100644 index 0000000..5771480 --- /dev/null +++ b/pytorch_geometric-2.4.0/conda/pyg/README.md @@ -0,0 +1,3 @@ +``` +./build_conda.sh 3.11 2.1.0 cu118 # python, pytorch and cuda version +``` diff --git a/pytorch_geometric-2.4.0/conda/pyg/build_conda.sh b/pytorch_geometric-2.4.0/conda/pyg/build_conda.sh new file mode 100644 index 0000000..084d3bf --- /dev/null +++ b/pytorch_geometric-2.4.0/conda/pyg/build_conda.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +export PYTHON_VERSION=$1 +export TORCH_VERSION=$2 +export CUDA_VERSION=$3 + +export CONDA_PYTORCH_CONSTRAINT="pytorch==${TORCH_VERSION%.*}.*" + +if [ "${CUDA_VERSION}" = "cpu" ]; then + export CONDA_CUDATOOLKIT_CONSTRAINT="cpuonly # [not osx]" +else + case $CUDA_VERSION in + cu121) + export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==12.1.*" + ;; + cu118) + export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.8.*" + ;; + cu117) + export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.7.*" + ;; + cu116) + if [ "${TORCH_VERSION}" = "1.12.0" ]; then + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.6.*" + else + export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.6.*" + fi + ;; + cu115) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.5.*" + ;; + cu113) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.3.*" + ;; + cu111) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.1.*" + ;; + cu102) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==10.2.*" + ;; + cu101) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==10.1.*" + ;; + *) + echo "Unrecognized CUDA_VERSION=$CUDA_VERSION" + exit 1 + ;; + esac +fi + +echo "PyTorch $TORCH_VERSION+$CUDA_VERSION" +echo "- $CONDA_PYTORCH_CONSTRAINT" +echo "- $CONDA_CUDATOOLKIT_CONSTRAINT" + +if [ "${TORCH_VERSION}" = "1.12.0" ] && [ "${CUDA_VERSION}" = "cu116" ]; then + conda build . -c pytorch -c pyg -c default -c nvidia -c conda-forge --output-folder "$HOME/conda-bld" +else + conda build . -c pytorch -c pyg -c default -c nvidia --output-folder "$HOME/conda-bld" +fi diff --git a/pytorch_geometric-2.4.0/conda/pyg/meta.yaml b/pytorch_geometric-2.4.0/conda/pyg/meta.yaml new file mode 100644 index 0000000..959e7e8 --- /dev/null +++ b/pytorch_geometric-2.4.0/conda/pyg/meta.yaml @@ -0,0 +1,42 @@ +package: + name: pyg + version: 2.3.1 + +source: + url: https://files.pythonhosted.org/packages/06/a5/9f5af849c4185da5ea55f70ef17e23f93355cd4e989d82cfc8ba2d8747af/torch_geometric-2.3.1.tar.gz + +requirements: + host: + - pip + - python {{ environ.get('PYTHON_VERSION') }} + + run: + - python {{ environ.get('PYTHON_VERSION') }} + - {{ environ.get('CONDA_PYTORCH_CONSTRAINT') }} + - {{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }} + - psutil + - tqdm + - jinja2 + - pyparsing + - numpy + - scipy + - requests + - scikit-learn + +build: + string: py{{ environ.get('PYTHON_VERSION').replace('.', '') }}_torch_{{ environ['TORCH_VERSION'] }}_{{ environ['CUDA_VERSION'] }} + script: pip install . + +test: + imports: + - torch_geometric + - torch_geometric.nn + - torch_geometric.data + - torch_geometric.utils + - torch_geometric.datasets + - torch_geometric.transforms + +about: + home: https://github.com/pyg-team/pytorch_geometric + license: MIT + summary: Graph Neural Network Library for PyTorch diff --git a/pytorch_geometric-2.4.0/conda/pytorch-geometric/README.md b/pytorch_geometric-2.4.0/conda/pytorch-geometric/README.md new file mode 100644 index 0000000..5771480 --- /dev/null +++ b/pytorch_geometric-2.4.0/conda/pytorch-geometric/README.md @@ -0,0 +1,3 @@ +``` +./build_conda.sh 3.11 2.1.0 cu118 # python, pytorch and cuda version +``` diff --git a/pytorch_geometric-2.4.0/conda/pytorch-geometric/build_conda.sh b/pytorch_geometric-2.4.0/conda/pytorch-geometric/build_conda.sh new file mode 100644 index 0000000..d78f2d9 --- /dev/null +++ b/pytorch_geometric-2.4.0/conda/pytorch-geometric/build_conda.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +export PYTHON_VERSION=$1 +export TORCH_VERSION=$2 +export CUDA_VERSION=$3 + +export CONDA_PYTORCH_CONSTRAINT="pytorch==${TORCH_VERSION%.*}.*" + +if [ "${CUDA_VERSION}" = "cpu" ]; then + export CONDA_CUDATOOLKIT_CONSTRAINT="cpuonly # [not osx]" +else + case $CUDA_VERSION in + cu121) + export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==12.1.*" + ;; + cu118) + export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.8.*" + ;; + cu117) + export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.7.*" + ;; + cu116) + if [ "${TORCH_VERSION}" = "1.12.0" ]; then + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.6.*" + else + export CONDA_CUDATOOLKIT_CONSTRAINT="pytorch-cuda==11.6.*" + fi + ;; + cu115) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.5.*" + ;; + cu113) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.3.*" + ;; + cu111) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==11.1.*" + ;; + cu102) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==10.2.*" + ;; + cu101) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit==10.1.*" + ;; + *) + echo "Unrecognized CUDA_VERSION=$CUDA_VERSION" + exit 1 + ;; + esac +fi + +echo "PyTorch $TORCH_VERSION+$CUDA_VERSION" +echo "- $CONDA_PYTORCH_CONSTRAINT" +echo "- $CONDA_CUDATOOLKIT_CONSTRAINT" + +if [ "${TORCH_VERSION}" = "1.12.0" ] && [ "${CUDA_VERSION}" = "cu116" ]; then + conda build . -c pytorch -c rusty1s -c default -c nvidia -c conda-forge --output-folder "$HOME/conda-bld" +else + conda build . -c pytorch -c rusty1s -c default -c nvidia --output-folder "$HOME/conda-bld" +fi diff --git a/pytorch_geometric-2.4.0/conda/pytorch-geometric/meta.yaml b/pytorch_geometric-2.4.0/conda/pytorch-geometric/meta.yaml new file mode 100644 index 0000000..21b9862 --- /dev/null +++ b/pytorch_geometric-2.4.0/conda/pytorch-geometric/meta.yaml @@ -0,0 +1,42 @@ +package: + name: pytorch-geometric + version: 2.3.1 + +source: + url: https://files.pythonhosted.org/packages/06/a5/9f5af849c4185da5ea55f70ef17e23f93355cd4e989d82cfc8ba2d8747af/torch_geometric-2.3.1.tar.gz + +requirements: + host: + - pip + - python {{ environ.get('PYTHON_VERSION') }} + + run: + - python {{ environ.get('PYTHON_VERSION') }} + - {{ environ.get('CONDA_PYTORCH_CONSTRAINT') }} + - {{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }} + - psutil + - tqdm + - jinja2 + - pyparsing + - numpy + - scipy + - requests + - scikit-learn + +build: + string: py{{ environ.get('PYTHON_VERSION').replace('.', '') }}_torch_{{ environ['TORCH_VERSION'] }}_{{ environ['CUDA_VERSION'] }} + script: pip install . + +test: + imports: + - torch_geometric + - torch_geometric.nn + - torch_geometric.data + - torch_geometric.utils + - torch_geometric.datasets + - torch_geometric.transforms + +about: + home: https://github.com/pyg-team/pytorch_geometric + license: MIT + summary: Graph Neural Network Library for PyTorch diff --git a/pytorch_geometric-2.3.1/docker/Dockerfile b/pytorch_geometric-2.4.0/docker/Dockerfile similarity index 99% rename from pytorch_geometric-2.3.1/docker/Dockerfile rename to pytorch_geometric-2.4.0/docker/Dockerfile index 2627294..d4f37f0 100644 --- a/pytorch_geometric-2.3.1/docker/Dockerfile +++ b/pytorch_geometric-2.4.0/docker/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 # metainformation -LABEL org.opencontainers.image.version = "2.1.0" +LABEL org.opencontainers.image.version = "2.3.1" LABEL org.opencontainers.image.authors = "Matthias Fey" LABEL org.opencontainers.image.source = "https://github.com/pyg-team/pytorch_geometric" LABEL org.opencontainers.image.licenses = "MIT" diff --git a/pytorch_geometric-2.3.1/docker/README.md b/pytorch_geometric-2.4.0/docker/README.md similarity index 100% rename from pytorch_geometric-2.3.1/docker/README.md rename to pytorch_geometric-2.4.0/docker/README.md diff --git a/pytorch_geometric-2.3.1/docker/singularity b/pytorch_geometric-2.4.0/docker/singularity similarity index 100% rename from pytorch_geometric-2.3.1/docker/singularity rename to pytorch_geometric-2.4.0/docker/singularity diff --git a/pytorch_geometric-2.3.1/docs/Makefile b/pytorch_geometric-2.4.0/docs/Makefile similarity index 100% rename from pytorch_geometric-2.3.1/docs/Makefile rename to pytorch_geometric-2.4.0/docs/Makefile diff --git a/pytorch_geometric-2.3.1/docs/README.md b/pytorch_geometric-2.4.0/docs/README.md similarity index 100% rename from pytorch_geometric-2.3.1/docs/README.md rename to pytorch_geometric-2.4.0/docs/README.md diff --git a/pytorch_geometric-2.4.0/docs/requirements.txt b/pytorch_geometric-2.4.0/docs/requirements.txt new file mode 100644 index 0000000..d11fab3 --- /dev/null +++ b/pytorch_geometric-2.4.0/docs/requirements.txt @@ -0,0 +1,4 @@ +https://download.pytorch.org/whl/cpu/torch-1.13.0%2Bcpu-cp38-cp38-linux_x86_64.whl +numpy>=1.19.5 +nbsphinx +git+https://github.com/pyg-team/pyg_sphinx_theme.git diff --git a/pytorch_geometric-2.3.1/docs/source/.gitignore b/pytorch_geometric-2.4.0/docs/source/.gitignore similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/.gitignore rename to pytorch_geometric-2.4.0/docs/source/.gitignore diff --git a/pytorch_geometric-2.3.1/docs/source/_figures/.gitignore b/pytorch_geometric-2.4.0/docs/source/_figures/.gitignore similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_figures/.gitignore rename to pytorch_geometric-2.4.0/docs/source/_figures/.gitignore diff --git a/pytorch_geometric-2.3.1/docs/source/_figures/architecture.svg b/pytorch_geometric-2.4.0/docs/source/_figures/architecture.svg similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_figures/architecture.svg rename to pytorch_geometric-2.4.0/docs/source/_figures/architecture.svg diff --git a/pytorch_geometric-2.3.1/docs/source/_figures/build.sh b/pytorch_geometric-2.4.0/docs/source/_figures/build.sh similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_figures/build.sh rename to pytorch_geometric-2.4.0/docs/source/_figures/build.sh diff --git a/pytorch_geometric-2.3.1/docs/source/_figures/graph.svg b/pytorch_geometric-2.4.0/docs/source/_figures/graph.svg similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_figures/graph.svg rename to pytorch_geometric-2.4.0/docs/source/_figures/graph.svg diff --git a/pytorch_geometric-2.3.1/docs/source/_figures/graph.tex b/pytorch_geometric-2.4.0/docs/source/_figures/graph.tex similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_figures/graph.tex rename to pytorch_geometric-2.4.0/docs/source/_figures/graph.tex diff --git a/pytorch_geometric-2.3.1/docs/source/_figures/graphgym_design_space.png b/pytorch_geometric-2.4.0/docs/source/_figures/graphgym_design_space.png similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_figures/graphgym_design_space.png rename to pytorch_geometric-2.4.0/docs/source/_figures/graphgym_design_space.png diff --git a/pytorch_geometric-2.3.1/docs/source/_figures/graphgym_evaluation.png b/pytorch_geometric-2.4.0/docs/source/_figures/graphgym_evaluation.png similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_figures/graphgym_evaluation.png rename to pytorch_geometric-2.4.0/docs/source/_figures/graphgym_evaluation.png diff --git a/pytorch_geometric-2.3.1/docs/source/_figures/graphgym_results.png b/pytorch_geometric-2.4.0/docs/source/_figures/graphgym_results.png similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_figures/graphgym_results.png rename to pytorch_geometric-2.4.0/docs/source/_figures/graphgym_results.png diff --git a/pytorch_geometric-2.3.1/docs/source/_figures/hg_example.svg b/pytorch_geometric-2.4.0/docs/source/_figures/hg_example.svg similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_figures/hg_example.svg rename to pytorch_geometric-2.4.0/docs/source/_figures/hg_example.svg diff --git a/pytorch_geometric-2.3.1/docs/source/_figures/hg_example.tex b/pytorch_geometric-2.4.0/docs/source/_figures/hg_example.tex similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_figures/hg_example.tex rename to pytorch_geometric-2.4.0/docs/source/_figures/hg_example.tex diff --git a/pytorch_geometric-2.3.1/docs/source/_figures/remote_1.png b/pytorch_geometric-2.4.0/docs/source/_figures/remote_1.png similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_figures/remote_1.png rename to pytorch_geometric-2.4.0/docs/source/_figures/remote_1.png diff --git a/pytorch_geometric-2.3.1/docs/source/_figures/remote_2.png b/pytorch_geometric-2.4.0/docs/source/_figures/remote_2.png similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_figures/remote_2.png rename to pytorch_geometric-2.4.0/docs/source/_figures/remote_2.png diff --git a/pytorch_geometric-2.3.1/docs/source/_figures/remote_3.png b/pytorch_geometric-2.4.0/docs/source/_figures/remote_3.png similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_figures/remote_3.png rename to pytorch_geometric-2.4.0/docs/source/_figures/remote_3.png diff --git a/pytorch_geometric-2.4.0/docs/source/_figures/shallow_node_embeddings.png b/pytorch_geometric-2.4.0/docs/source/_figures/shallow_node_embeddings.png new file mode 100644 index 0000000..c248e09 Binary files /dev/null and b/pytorch_geometric-2.4.0/docs/source/_figures/shallow_node_embeddings.png differ diff --git a/pytorch_geometric-2.3.1/docs/source/_figures/to_hetero.svg b/pytorch_geometric-2.4.0/docs/source/_figures/to_hetero.svg similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_figures/to_hetero.svg rename to pytorch_geometric-2.4.0/docs/source/_figures/to_hetero.svg diff --git a/pytorch_geometric-2.3.1/docs/source/_figures/to_hetero.tex b/pytorch_geometric-2.4.0/docs/source/_figures/to_hetero.tex similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_figures/to_hetero.tex rename to pytorch_geometric-2.4.0/docs/source/_figures/to_hetero.tex diff --git a/pytorch_geometric-2.3.1/docs/source/_figures/to_hetero_with_bases.svg b/pytorch_geometric-2.4.0/docs/source/_figures/to_hetero_with_bases.svg similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_figures/to_hetero_with_bases.svg rename to pytorch_geometric-2.4.0/docs/source/_figures/to_hetero_with_bases.svg diff --git a/pytorch_geometric-2.3.1/docs/source/_figures/to_hetero_with_bases.tex b/pytorch_geometric-2.4.0/docs/source/_figures/to_hetero_with_bases.tex similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_figures/to_hetero_with_bases.tex rename to pytorch_geometric-2.4.0/docs/source/_figures/to_hetero_with_bases.tex diff --git a/pytorch_geometric-2.3.1/docs/source/_figures/training_affinity.png b/pytorch_geometric-2.4.0/docs/source/_figures/training_affinity.png similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_figures/training_affinity.png rename to pytorch_geometric-2.4.0/docs/source/_figures/training_affinity.png diff --git a/pytorch_geometric-2.3.1/docs/source/_static/js/version_alert.js b/pytorch_geometric-2.4.0/docs/source/_static/js/version_alert.js similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_static/js/version_alert.js rename to pytorch_geometric-2.4.0/docs/source/_static/js/version_alert.js diff --git a/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/create_dataset.png b/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/create_dataset.png new file mode 100644 index 0000000..b43503d Binary files /dev/null and b/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/create_dataset.png differ diff --git a/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/create_gnn.png b/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/create_gnn.png new file mode 100644 index 0000000..717ccf2 Binary files /dev/null and b/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/create_gnn.png differ diff --git a/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/explain.png b/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/explain.png new file mode 100644 index 0000000..6b0869a Binary files /dev/null and b/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/explain.png differ diff --git a/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/heterogeneous.png b/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/heterogeneous.png new file mode 100644 index 0000000..86c8b20 Binary files /dev/null and b/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/heterogeneous.png differ diff --git a/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/load_csv.png b/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/load_csv.png new file mode 100644 index 0000000..a5cfe59 Binary files /dev/null and b/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/load_csv.png differ diff --git a/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/multi_gpu_vanilla.png b/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/multi_gpu_vanilla.png new file mode 100644 index 0000000..d0f3405 Binary files /dev/null and b/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/multi_gpu_vanilla.png differ diff --git a/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/neighbor_loader.png b/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/neighbor_loader.png new file mode 100644 index 0000000..98fe8bb Binary files /dev/null and b/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/neighbor_loader.png differ diff --git a/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/shallow_node_embeddings.png b/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/shallow_node_embeddings.png new file mode 100644 index 0000000..fd26940 Binary files /dev/null and b/pytorch_geometric-2.4.0/docs/source/_static/thumbnails/shallow_node_embeddings.png differ diff --git a/pytorch_geometric-2.3.1/docs/source/_templates/autosummary/class.rst b/pytorch_geometric-2.4.0/docs/source/_templates/autosummary/class.rst similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_templates/autosummary/class.rst rename to pytorch_geometric-2.4.0/docs/source/_templates/autosummary/class.rst diff --git a/pytorch_geometric-2.3.1/docs/source/_templates/autosummary/inherited_class.rst b/pytorch_geometric-2.4.0/docs/source/_templates/autosummary/inherited_class.rst similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_templates/autosummary/inherited_class.rst rename to pytorch_geometric-2.4.0/docs/source/_templates/autosummary/inherited_class.rst diff --git a/pytorch_geometric-2.3.1/docs/source/_templates/autosummary/nn.rst b/pytorch_geometric-2.4.0/docs/source/_templates/autosummary/nn.rst similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_templates/autosummary/nn.rst rename to pytorch_geometric-2.4.0/docs/source/_templates/autosummary/nn.rst diff --git a/pytorch_geometric-2.3.1/docs/source/_templates/autosummary/only_class.rst b/pytorch_geometric-2.4.0/docs/source/_templates/autosummary/only_class.rst similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/_templates/autosummary/only_class.rst rename to pytorch_geometric-2.4.0/docs/source/_templates/autosummary/only_class.rst diff --git a/pytorch_geometric-2.4.0/docs/source/advanced/batching.rst b/pytorch_geometric-2.4.0/docs/source/advanced/batching.rst new file mode 100644 index 0000000..881755e --- /dev/null +++ b/pytorch_geometric-2.4.0/docs/source/advanced/batching.rst @@ -0,0 +1,238 @@ +Advanced Mini-Batching +====================== + +The creation of mini-batching is crucial for letting the training of a deep learning model scale to huge amounts of data. +Instead of processing examples one-by-one, a mini-batch groups a set of examples into a unified representation where it can efficiently be processed in parallel. +In the image or language domain, this procedure is typically achieved by rescaling or padding each example into a set to equally-sized shapes, and examples are then grouped in an additional dimension. +The length of this dimension is then equal to the number of examples grouped in a mini-batch and is typically referred to as the :obj:`batch_size`. + +Since graphs are one of the most general data structures that can hold *any* number of nodes or edges, the two approaches described above are either not feasible or may result in a lot of unnecessary memory consumption. +In :pyg:`PyG`, we opt for another approach to achieve parallelization across a number of examples. +Here, adjacency matrices are stacked in a diagonal fashion (creating a giant graph that holds multiple isolated subgraphs), and node and target features are simply concatenated in the node dimension, *i.e.* + +.. math:: + + \mathbf{A} = \begin{bmatrix} \mathbf{A}_1 & & \\ & \ddots & \\ & & \mathbf{A}_n \end{bmatrix}, \qquad \mathbf{X} = \begin{bmatrix} \mathbf{X}_1 \\ \vdots \\ \mathbf{X}_n \end{bmatrix}, \qquad \mathbf{Y} = \begin{bmatrix} \mathbf{Y}_1 \\ \vdots \\ \mathbf{Y}_n \end{bmatrix}. + +This procedure has some crucial advantages over other batching procedures: + +1. GNN operators that rely on a message passing scheme do not need to be modified since messages still cannot be exchanged between two nodes that belong to different graphs. + +2. There is no computational or memory overhead. + For example, this batching procedure works completely without any padding of node or edge features. + Note that there is no additional memory overhead for adjacency matrices since they are saved in a sparse fashion holding only non-zero entries, *i.e.*, the edges. + +:pyg:`PyG` automatically takes care of batching multiple graphs into a single giant graph with the help of the :class:`torch_geometric.loader.DataLoader` class. +Internally, :class:`~torch_geometric.loader.DataLoader` is just a regular :pytorch:`PyTorch` :class:`torch.utils.data.DataLoader` that overwrites its :func:`collate` functionality, *i.e.*, the definition of how a list of examples should be grouped together. +Therefore, all arguments that can be passed to a :pytorch:`PyTorch` :class:`~torch.utils.data.DataLoader` can also be passed to a :pyg:`PyG` :class:`~torch_geometric.loader.DataLoader`, *e.g.*, the number of workers :obj:`num_workers`. + +In its most general form, the :pyg:`PyG` :class:`~torch_geometric.loader.DataLoader` will automatically increment the :obj:`edge_index` tensor by the cumulated number of nodes of all graphs that got collated before the currently processed graph, and will concatenate :obj:`edge_index` tensors (that are of shape :obj:`[2, num_edges]`) in the second dimension. +The same is true for :obj:`face` tensors, *i.e.*, face indices in meshes. +All other tensors will just get concatenated in the first dimension without any further increasement of their values. + +However, there are a few special use-cases (as outlined below) where the user actively wants to modify this behavior to its own needs. +:pyg:`PyG` allows modification to the underlying batching procedure by overwriting the :meth:`torch_geometric.data.Data.__inc__` and :meth:`torch_geometric.data.Data.__cat_dim__` functionalities. +Without any modifications, these are defined as follows in the :class:`~torch_geometric.data.Data` class: + +.. code-block:: python + + def __inc__(self, key, value, *args, **kwargs): + if 'index' in key: + return self.num_nodes + else: + return 0 + + def __cat_dim__(self, key, value, *args, **kwargs): + if 'index' in key: + return 1 + else: + return 0 + +We can see that :meth:`~torch_geometric.data.Data.__inc__` defines the incremental count between two consecutive graph attributes. +By default, :pyg:`PyG` increments attributes by the number of nodes whenever their attribute names contain the substring :obj:`index` (for historical reasons), which comes in handy for attributes such as :obj:`edge_index` or :obj:`node_index`. +However, note that this may lead to unexpected behavior for attributes whose names contain the substring :obj:`index` but should not be incremented. +To make sure, it is best practice to always double-check the output of batching. +Furthermore, :meth:`~torch_geometric.data.Data.__cat_dim__` defines in which dimension graph tensors of the same attribute should be concatenated together. +Both functions are called for each attribute stored in the :class:`~torch_geometric.data.Data` class, and get passed their specific :obj:`key` and value :obj:`item` as arguments. + +In what follows, we present a few use-cases where the modification of :meth:`~torch_geometric.data.Data.__inc__` and :meth:`~torch_geometric.data.Data.__cat_dim__` might be absolutely necessary. + +Pairs of Graphs +--------------- + +In case you want to store multiple graphs in a single :class:`~torch_geometric.data.Data` object, *e.g.*, for applications such as graph matching, you need to ensure correct batching behavior across all those graphs. +For example, consider storing two graphs, a source graph :math:`\mathcal{G}_s` and a target graph :math:`\mathcal{G}_t` in a :class:`~torch_geometric.data.Data`, *e.g.*: + +.. code-block:: python + + from torch_geometric.data import Data + + class PairData(Data): + pass + + data = PairData(x_s=x_s, edge_index_s=edge_index_s, # Source graph. + x_t=x_t, edge_index_t=edge_index_t) # Target graph. + +In this case, :obj:`edge_index_s` should be increased by the number of nodes in the source graph :math:`\mathcal{G}_s`, *e.g.*, :obj:`x_s.size(0)`, and :obj:`edge_index_t` should be increased by the number of nodes in the target graph :math:`\mathcal{G}_t`, *e.g.*, :obj:`x_t.size(0)`: + +.. code-block:: python + + class PairData(Data): + def __inc__(self, key, value, *args, **kwargs): + if key == 'edge_index_s': + return self.x_s.size(0) + if key == 'edge_index_t': + return self.x_t.size(0) + return super().__inc__(key, value, *args, **kwargs) + +We can test our :class:`PairData` batching behavior by setting up a simple test script: + +.. code-block:: python + + from torch_geometric.loader import DataLoader + + x_s = torch.randn(5, 16) # 5 nodes. + edge_index_s = torch.tensor([ + [0, 0, 0, 0], + [1, 2, 3, 4], + ]) + + x_t = torch.randn(4, 16) # 4 nodes. + edge_index_t = torch.tensor([ + [0, 0, 0], + [1, 2, 3], + ]) + + data = PairData(x_s=x_s, edge_index_s=edge_index_s, + x_t=x_t, edge_index_t=edge_index_t) + + data_list = [data, data] + loader = DataLoader(data_list, batch_size=2) + batch = next(iter(loader)) + + print(batch) + >>> PairDataBatch(x_s=[10, 16], edge_index_s=[2, 8], + x_t=[8, 16], edge_index_t=[2, 6]) + + print(batch.edge_index_s) + >>> tensor([[0, 0, 0, 0, 5, 5, 5, 5], + [1, 2, 3, 4, 6, 7, 8, 9]]) + + print(batch.edge_index_t) + >>> tensor([[0, 0, 0, 4, 4, 4], + [1, 2, 3, 5, 6, 7]]) + +Everything looks good so far! +:obj:`edge_index_s` and :obj:`edge_index_t` get correctly batched together, even when using a different numbers of nodes for :math:`\mathcal{G}_s` and :math:`\mathcal{G}_t`. +However, the :obj:`batch` attribute (that maps each node to its respective graph) is missing since :pyg:`PyG` fails to identify the actual graph in the :class:`PairData` object. +That is where the :obj:`follow_batch` argument of the :class:`~torch_geometric.loader.DataLoader` comes into play. +Here, we can specify for which attributes we want to maintain the batch information: + +.. code-block:: python + + loader = DataLoader(data_list, batch_size=2, follow_batch=['x_s', 'x_t']) + batch = next(iter(loader)) + + print(batch) + >>> PairDataBatch(x_s=[10, 16], edge_index_s=[2, 8], x_s_batch=[10], + x_t=[8, 16], edge_index_t=[2, 6], x_t_batch=[8]) + + print(batch.x_s_batch) + >>> tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) + + print(batch.x_t_batch) + >>> tensor([0, 0, 0, 0, 1, 1, 1, 1]) + +As one can see, :obj:`follow_batch=['x_s', 'x_t']` now successfully creates assignment vectors :obj:`x_s_batch` and :obj:`x_t_batch` for the node features :obj:`x_s` and :obj:`x_t`, respectively. +That information can now be used to perform reduce operations, *e.g.*, global pooling, on multiple graphs in a single :class:`Batch` object. + +Bipartite Graphs +---------------- + +The adjacency matrix of a bipartite graph defines the relationship between nodes of two different node types. +In general, the number of nodes for each node type do not need to match, resulting in a non-quadratic adjacency matrix of shape :math:`\mathbf{A} \in \{ 0, 1 \}^{N \times M}` with :math:`N \neq M` potentially. +In a mini-batching procedure of bipartite graphs, the source nodes of edges in :obj:`edge_index` should get increased differently than the target nodes of edges in :obj:`edge_index`. +To achieve this, consider a bipartite graph between two node types with corresponding node features :obj:`x_s` and :obj:`x_t`, respectively: + +.. code-block:: python + + from torch_geometric.data import Data + + class BipartiteData(Data): + pass + + data = BipartiteData(x_s=x_s, x_t=x_t, edge_index=edge_index) + +For a correct mini-batching procedure in bipartite graphs, we need to tell :pyg:`PyG` that it should increment source and target nodes of edges in :obj:`edge_index` independently: + +.. code-block:: python + + class BipartiteData(Data): + def __inc__(self, key, value, *args, **kwargs): + if key == 'edge_index': + return torch.tensor([[self.x_s.size(0)], [self.x_t.size(0)]]) + return super().__inc__(key, value, *args, **kwargs) + +Here, :obj:`edge_index[0]` (the source nodes of edges) get incremented by :obj:`x_s.size(0)` while :obj:`edge_index[1]` (the target nodes of edges) get incremented by :obj:`x_t.size(0)`. +We can again test our implementation by running a simple test script: + +.. code-block:: python + + from torch_geometric.loader import DataLoader + + x_s = torch.randn(2, 16) # 2 nodes. + x_t = torch.randn(3, 16) # 3 nodes. + edge_index = torch.tensor([ + [0, 0, 1, 1], + [0, 1, 1, 2], + ]) + + data = BipartiteData(x_s=x_s, x_t=x_t, edge_index=edge_index) + + data_list = [data, data] + loader = DataLoader(data_list, batch_size=2) + batch = next(iter(loader)) + + print(batch) + >>> BipartiteDataBatch(x_s=[4, 16], x_t=[6, 16], edge_index=[2, 8]) + + print(batch.edge_index) + >>> tensor([[0, 0, 1, 1, 2, 2, 3, 3], + [0, 1, 1, 2, 3, 4, 4, 5]]) + +Again, this is exactly the behavior we aimed for! + +Batching Along New Dimensions +----------------------------- + +Sometimes, attributes of :obj:`data` objects should be batched by gaining a new batch dimension (as in classical mini-batching), *e.g.*, for graph-level properties or targets. +Specifically, a list of attributes of shape :obj:`[num_features]` should be returned as :obj:`[num_examples, num_features]` rather than :obj:`[num_examples * num_features]`. +:pyg:`PyG` achieves this by returning a concatenation dimension of :obj:`None` in :meth:`~torch_geometric.data.Data.__cat_dim__`: + +.. code-block:: python + + from torch_geometric.data import Data + from torch_geometric.loader import DataLoader + + class MyData(Data): + def __cat_dim__(self, key, value, *args, **kwargs): + if key == 'foo': + return None + return super().__cat_dim__(key, value, *args, **kwargs) + + edge_index = torch.tensor([ + [0, 1, 1, 2], + [1, 0, 2, 1], + ]) + foo = torch.randn(16) + + data = MyData(num_nodes=3, edge_index=edge_index, foo=foo) + + data_list = [data, data] + loader = DataLoader(data_list, batch_size=2) + batch = next(iter(loader)) + + print(batch) + >>> MyDataBatch(num_nodes=6, edge_index=[2, 8], foo=[2, 16]) + +As desired, :obj:`batch.foo` is now described by two dimensions: The batch dimension and the feature dimension. diff --git a/pytorch_geometric-2.4.0/docs/source/advanced/compile.rst b/pytorch_geometric-2.4.0/docs/source/advanced/compile.rst new file mode 100644 index 0000000..47693bf --- /dev/null +++ b/pytorch_geometric-2.4.0/docs/source/advanced/compile.rst @@ -0,0 +1,164 @@ +Compiled Graph Neural Networks +============================== + +:meth:`torch.compile` is the latest method to speed up your :pytorch:`PyTorch` code in :obj:`torch >= 2.0.0`! +:meth:`torch.compile` makes PyTorch code run faster by JIT-compiling it into optimized kernels, all while required minimal code changes. + +Under the hood, :meth:`torch.compile` captures :pytorch:`PyTorch` programs via :obj:`TorchDynamo`, canonicalizes over 2,000 :pytorch:`PyTorch` operators via :obj:`PrimTorch`, and finally generates fast code out of it across multiple accelerators and backends via the deep learning compiler :obj:`TorchInductor`. + +.. note:: + See `here `__ for a general tutorial on how to leverage :meth:`torch.compile`, and `here `__ for a description of its interface. + +In this tutorial, we show how to optimize your custom :pyg:`PyG` model via :meth:`torch.compile`. + +:meth:`torch_geometric.compile` +------------------------------- + +By default, :meth:`torch.compile` struggles to optimize a custom :pyg:`PyG` model since its underlying :class:`~torch_geometric.nn.conv.MessagePassing` interface is JIT-unfriendly due to its generality. +As such, in :pyg:`PyG 2.3`, we introduce :meth:`torch_geometric.compile`, a wrapper around :meth:`torch.compile` with the same signature. + +:meth:`torch_geometric.compile` applies further optimizations to make :pyg:`PyG` models more compiler-friendly. +Specifically, it: + +#. Temporarily disables the usage of the extension packages :obj:`torch_scatter`, :obj:`torch_sparse` and :obj:`pyg_lib` during GNN execution workflows (since these are not *yet* directly optimizable by :pytorch:`PyTorch`). + From :pyg:`PyG 2.3` onwards, these packages are purely optional and not required anymore for running :pyg:`PyG` models (but :obj:`pyg_lib` may be required for graph sampling routines). + +#. Converts all instances of :class:`~torch_geometric.nn.conv.MessagePassing` modules into their jittable instances (see :meth:`torch_geometric.nn.conv.MessagePassing.jittable`) + +Without these adjustments, :meth:`torch.compile` may currently fail to correctly optimize your :pyg:`PyG` model. +We are working on fully relying on :meth:`torch.compile` for future releases. + +Basic Usage +----------- + +Leveraging :meth:`torch_geometric.compile` is as simple as the usage of :meth:`torch.compile`. +Once you have a :pyg:`PyG` model defined, simply wrap it with :meth:`torch_geometric.compile` to obtain its optimized version: + +.. code-block:: python + + import torch_geometric + from torch_geometric.nn import GraphSAGE + + model = GraphSAGE(in_channels, hidden_channels, num_layers, out_channels) + model = model.to(device) + + model = torch_geometric.compile(model) + +and execute it as usual: + +.. code-block:: python + + from torch_geometric.datasets import Planetoid + + dataset = Planetoid(root, name="Cora") + data = dataset[0].to(device) + + out = model(data.x, data.edge_index) + +Maximizing Performance +---------------------- + +The :meth:`torch.compile`/:meth:`torch_geometric.compile` method provides two important arguments to be aware of: + +* Most of the mini-batches observed in :pyg:`PyG` are dynamic by nature, meaning that their shape varies across different mini-batches. + For these scenarios, we can enforce dynamic shape tracing in :pytorch:`PyTorch` via the :obj:`dynamic=True` argument: + + .. code-block:: python + + torch_geometric.compile(model, dynamic=True) + + With this, :pytorch:`PyTorch` will up-front attempt to generate a kernel that is as dynamic as possible to avoid recompilations when sizes change across mini-batches changes. + Note that when :obj:`dynamic` is set to :obj:`False`, :pytorch:`PyTorch` will *never* generate dynamic kernels, leading to significant slowdowns in model execution on dynamic mini-batches. + As such, you should only ever not specify :obj:`dynamic=True` when graph sizes are guaranteed to never change. + Note that :obj:`dynamic=True` requires :pytorch:`PyTorch` :obj:`>= 2.1.0` to be installed. + +* In order to maximize speedup, graphs breaks in the compiled model should be limited. + We can force compilation to raise an error upon the first graph break encountered by using the :obj:`fullgraph=True` argument: + + .. code-block:: python + + torch_geometric.compile(model, fullgraph=True) + + It is generally a good practice to confirm that your written model does not contain any graph breaks. + Importantly, there exists a few operations in :pyg:`PyG` that will currently lead to graph breaks (but workaround exists), *e.g.*: + + 1. :meth:`~torch_geometric.nn.pool.global_mean_pool` (and other pooling operators) perform device synchronization in case the batch size :obj:`size` is not passed, leading to a graph break. + + 2. :meth:`~torch_geometric.utils.remove_self_loops` and :meth:`~torch_geometric.utils.add_remaining_self_loops` mask the given :obj:`edge_index`, leading to a device synchronization to compute its final output shape. + As such, we recommend to augment your graph *before* inputting it into your GNN, *e.g.*, via the :class:`~torch_geometric.transforms.AddSelfLoops` or :class:`~torch_geometric.transforms.GCNNorm` transformations, and setting :obj:`add_self_loops=False`/:obj:`normalize=False` when initializing layers such as :class:`~torch_geometric.nn.conv.GCNNorm`. + +Exampe Scripts +-------------- + +We have incorporated multiple examples in :obj:`examples/compile` that further show the practical usage of :meth:`torch_geometric.compile`: + +#. `Node Classification `__ via :class:`~torch_geometric.nn.models.GCN` (:obj:`dynamic=False`) +#. `Graph Classification `__ via :class:`~torch_geometric.nn.models.GIN` (:obj:`dynamic=True`) + +If you notice that :meth:`~torch_geometric.compile` fails for a certain :pyg:`PyG` model, do not hesitate to reach out either on :github:`null` `GitHub `_ or :slack:`null` `Slack `_. +We are very eager to improve :meth:`~torch_geometric.compile` support across the whole :pyg:`PyG` code base. + +Benchmark +--------- + +:meth:`torch.compile` works **fantastically well** for many :pyg:`PyG` models. +**Overall, we observe runtime improvements of nearly up to 300%.** + +Specifically, we benchmark :class:`~torch_geometric.nn.models.GCN`, :class:`~torch_geometric.nn.models.GraphSAGE` and :class:`~torch_geometric.nn.models.GIN` and compare runtimes obtained from traditional eager mode and :meth:`torch_geometric.compile`. +We use a synthetic graph with 10,000 nodes and 200,000 edges, and a hidden feature dimensionality of 64. +We report runtimes over 500 optimization steps: + +.. list-table:: + :widths: 15 15 15 15 15 15 + :header-rows: 1 + + * - Model + - Mode + - Forward + - Backward + - Total + - Speedup + * - :class:`~torch_geometric.nn.models.GCN` + - Eager + - 2.6396s + - 2.1697s + - 4.8093s + - + * - :class:`~torch_geometric.nn.models.GCN` + - **Compiled** + - **1.1082s** + - **0.5896s** + - **1.6978s** + - **2.83x** + * - :class:`~torch_geometric.nn.models.GraphSAGE` + - Eager + - 1.6023s + - 1.6428s + - 3.2451s + - + * - :class:`~torch_geometric.nn.models.GraphSAGE` + - **Compiled** + - **0.7033s** + - **0.7465s** + - **1.4498s** + - **2.24x** + * - :class:`~torch_geometric.nn.models.GIN` + - Eager + - 1.6701s + - 1.6990s + - 3.3690s + - + * - :class:`~torch_geometric.nn.models.GIN` + - **Compiled** + - **0.7320s** + - **0.7407s** + - **1.4727s** + - **2.29x** + +To reproduce these results, run + +.. code-block:: console + + python test/nn/models/test_basic_gnn.py + +from the root folder of your checked out :pyg:`PyG` repository from :github:`GitHub`. diff --git a/pytorch_geometric-2.3.1/docs/source/advanced/cpu_affinity.rst b/pytorch_geometric-2.4.0/docs/source/advanced/cpu_affinity.rst similarity index 92% rename from pytorch_geometric-2.3.1/docs/source/advanced/cpu_affinity.rst rename to pytorch_geometric-2.4.0/docs/source/advanced/cpu_affinity.rst index c128aea..e4d7f72 100644 --- a/pytorch_geometric-2.3.1/docs/source/advanced/cpu_affinity.rst +++ b/pytorch_geometric-2.4.0/docs/source/advanced/cpu_affinity.rst @@ -16,8 +16,8 @@ The following article discusses readily available tools and environment settings .. note:: Overall, CPU affinity can be a useful tool for improving the performance and predictability of certain types of applications, but one configuration does not necessarily fit all cases: it is important to carefully consider whether CPU affinity is appropriate for your use case, and to test and measure the impact of any changes you make. -Using CPU affinity and :attr:`filter_per_worker` ------------------------------------------------- +Using CPU affinity +------------------ Each :pyg:`PyG` workload can be parallelized using the :pytorch:`PyTorch` iterator class :class:`MultiProcessingDataLoaderIter`, which is automatically enabled in case :obj:`num_workers > 0` is passed to a :class:`torch.utils.data.DataLoader`. Under the hood, it creates :obj:`num_workers` many sub-processes that will run in parallel to the main process. @@ -39,7 +39,6 @@ The recommended number of workers to start with lies between :obj:`[2, 4]`, and loader = NeigborLoader( data, num_workers=3, - filter_per_worker=True, ..., ) @@ -47,14 +46,11 @@ The recommended number of workers to start with lies between :obj:`[2, 4]`, and for batch in loader: pass -It is generally adivisable to use :obj:`filter_per_worker=True` when enabling multi-process dataloaders. +It is generally advisable to use :obj:`filter_per_worker=True` for any multi-process CPU workloads (:obj:`True` by default). The workers then prepare each mini-batch: first by sampling the node indices using pre-defined a sampler, and secondly filtering node and edge features according to sampled nodes and edges. The filtering function selects node feature vectors from the complete input :class:`~torch_geometric.data.Data` tensor loaded into DRAM. -This is a memory-expensive call which takes a significant time of each :class:`~torch.utisl.data.DataLoader` iteration. -By default :attr:`filter_per_worker` is set to :attr:`False`, which causes that this execution is sent back to the main process. -However, this can cause performance issues, because the main process will not be able to process all requests efficiently, especially with larger number of workers. When :attr:`filter_per_worker` is set to :attr:`True`, each worker's subprocess performs the filtering within it's CPU resource. -This, main process resources are relieved and can be secured only for GNN computation. +Hence, main process resources are relieved and can be secured only for GNN computation. Binding processes to physical cores ----------------------------------- @@ -133,7 +129,7 @@ The general guidelines for achieving the best performance with CPU affinity can #. Enable multi-process data loaders by setting :attr:`num_workers > 0`. A good estimate for :obj:`num_workers` lies in the range :obj:`[2, 4]`. However, for more complex datasets you might want to experiment with larger number of workers. - Enable :pyg:`PyG` data loaders with :obj:`filter_per_worker=True` and use the :meth:`~torch_geometric.loader.AffinityMixin.enable_cpu_affinity` feature to affinitize :class:`~torch.utils.data.DataLoader` cores. + Use the :meth:`~torch_geometric.loader.AffinityMixin.enable_cpu_affinity` feature to affinitize :class:`~torch.utils.data.DataLoader` cores. #. Bind execution to physical cores. Alternatively, hyperthreading can be disabled completely at a system-level. #. Separate the cores used for main process from the data loader workers' cores by using :obj:`numactl`, :obj:`KMP_AFFINITY` of the :obj:`libiomp5` library, or :obj:`GOMP_CPU_AFFINITY` of the :obj:`libgomp` library. @@ -165,14 +161,14 @@ Three different affinity configurations are presented: .. code-block:: console - LD_PRELOAD=(path)/libjemalloc.so (path)/libiomp5.so MALLOC_CONF=oversize_threshold:1,background_thread:true,metadata_thp:auto OMP_NUM_THREADS=(N-num_workers) KMP_AFFINITY=granularity=fine,compact,1,0 KMP_BLOCKTIME=0 numactl -C --localalloc python training_benchmark.py --cpu-affinity --filter_per_worker --num-workers … + LD_PRELOAD=(path)/libjemalloc.so (path)/libiomp5.so MALLOC_CONF=oversize_threshold:1,background_thread:true,metadata_thp:auto OMP_NUM_THREADS=(N-num_workers) KMP_AFFINITY=granularity=fine,compact,1,0 KMP_BLOCKTIME=0 numactl -C --localalloc python training_benchmark.py --cpu-affinity --num-workers … * **Aff+SocketSep** - data loader process on first socket, main process on second socket, 60 threads: .. code-block:: console - LD_PRELOAD=(path)/libjemalloc.so (path)/libiomp5.so MALLOC_CONF=oversize_threshold:1,background_thread:true,metadata_thp:auto OMP_NUM_THREADS=(N-M) KMP_AFFINITY=granularity=fine,compact,1,0 KMP_BLOCKTIME=0 numactl -C -m 1 python training_benchmark.py --cpu-affinity --filter_per_worker --num-workers ... + LD_PRELOAD=(path)/libjemalloc.so (path)/libiomp5.so MALLOC_CONF=oversize_threshold:1,background_thread:true,metadata_thp:auto OMP_NUM_THREADS=(N-M) KMP_AFFINITY=granularity=fine,compact,1,0 KMP_BLOCKTIME=0 numactl -C -m 1 python training_benchmark.py --cpu-affinity --num-workers ... Training times for each model/dataset combination were obtained by taking a mean of results at a variable number of dataloader workers: :obj:`[0, 2, 4, 8, 16]` for the baseline and :obj:`[2, 4, 8, 16]` workers for each affinity configuration. Then, the affinity means were normalized with respect to the mean baseline measurement. diff --git a/pytorch_geometric-2.3.1/docs/source/advanced/graphgym.rst b/pytorch_geometric-2.4.0/docs/source/advanced/graphgym.rst similarity index 98% rename from pytorch_geometric-2.3.1/docs/source/advanced/graphgym.rst rename to pytorch_geometric-2.4.0/docs/source/advanced/graphgym.rst index 22d879e..344898f 100644 --- a/pytorch_geometric-2.3.1/docs/source/advanced/graphgym.rst +++ b/pytorch_geometric-2.4.0/docs/source/advanced/graphgym.rst @@ -107,12 +107,7 @@ To use GraphGym, you need to clone :pyg:`PyG` from :github:`GitHub`, then change bash run_batch.sh # run a batch of experiments #. **Run GraphGym with CPU backend:** - GraphGym supports CPU backend as well -- you only need to add the line :obj:`device: cpu` to the :obj:`*.yaml` file. - Here we provide an example: - - .. code-block:: bash - - bash run_single_cpu.sh # run a single experiment using CPU backend + GraphGym supports CPU backend as well -- you only need to add the line :obj:`accelerator: cpu` to the :obj:`*.yaml` file. In-Depth Usage -------------- diff --git a/pytorch_geometric-2.4.0/docs/source/advanced/hgam.rst b/pytorch_geometric-2.4.0/docs/source/advanced/hgam.rst new file mode 100644 index 0000000..f957fe0 --- /dev/null +++ b/pytorch_geometric-2.4.0/docs/source/advanced/hgam.rst @@ -0,0 +1,165 @@ +Hierarchical Neighborhood Sampling +================================== + +One of the design principles of :pyg:`PyG` is that models and data loading routines should be exchangeable to allow for flexible GNN and data loading experimentation. +As such, models can usually be written in a data loading agnostic fashion, independent of whether one applies full-batch or mini-batch training strategies via, *e.g.*, :class:`~torch_geometric.loader.DataLoader`, :class:`~torch_geometric.loader.NeighborLoader` or :class:`~torch_geometric.loader.ClusterLoader`. +However, in some scenarios, this flexibility comes at the cost of performance, as the model cannot exploit special characteristics of the underlying data loading routine. +One such limitation is that a GNN trained with the :class:`~torch_geometric.loader.NeighborLoader` routine iteratively builds representations for *all* nodes at *all* depths of the network, although nodes sampled in later hops do not contribute to the node representations of seed nodes in later GNN layers anymore, thus performing useless computation. + +*Hierarchical Neighborhood Sampling* or *Hierarchical Graph Adjacency Matrix (HGAM)* is a technique available in :pyg:`PyG` to eliminate this overhead and speeds up training and inference in mini-batch GNNs. +Its main idea is to progressively trim the adjacency matrix of the returned subgraph before inputting it to each GNN layer. +It works seamlessly across several models, basically reducing the amount of compute necessary to generate the representations for the seed node of the given mini-batch. + +Crucially, HGAM recognizes that the computation of the final node representations is only necessary for the seed nodes (which are the real target of the batch computation). +Thus, HGAM allows for every layer of the GNN to compute only the representations of the nodes that are necessary for that layer, leading to a reduction of the computation and a speed up of the training process that grows with the depth of the GNN being considered. +In practice, this is achieved by **trimming the adjacency matrix** and the various **features matrices** as the computation proceeds throughout the GNN layers. +This is in line with the fact that in order to compute the representation for the seed/target nodes (from which the mini-batch was build via sampling methods), the depth of the relevant neighborhood shrinks as we proceed through the layers of the GNN. +The trimming applied by HGAM is possible as the nodes of the subgraph built via sampling are ordered according to a *Breadth First Search (BFS)* strategy, meaning that the rows and columns of the adjacency matrix refer to a node ordering that starts with the seed nodes (in any order) followed by the 1-hop neighbors of the first seed node, followed by the 1-hop sampled neighbors of the second seed node and so on. +The BFS ordering of nodes in a mini-batch allows for incremental trimming (reduction) of the adjacency matrix of the subgraph. +This progressive trimming is done in a computational convenient manner thanks to the BFS ordering that causes the nodes more distant from the seed nodes to be appear farther away in the list of ordered nodes. + +To support this trimming and implement it effectively, the :class:`~torch_geometric.loader.NeighborLoader` implementation in :pyg:`PyG` and in :pyg:`pyg-lib` additionally return the number of nodes and edges sampled in hop. +This information allows for fast manipulation of the adjacency matrix, which in turns lead to great computation reduction. +The :class:`~torch_geometric.loader.NeighborLoader` prepares this metadata via the dedicated attributes :obj:`num_sampled_nodes` and :obj:`num_sampled_edges`. +It can be accessed from the :class:`~torch_geometric.data.Batch` object returned for both homogeneous and heterogeneous graphs. + +To sum up, HGAM is special data structure that enables efficient message passing computation in :class:`~torch_geometric.loader.NeighborLoader` scenarios. +HGAM is implemented in :pyg:`PyG` and can be utilized via the special :meth:`~torch_geometric.utils.trim_to_layer` functionality. +HGAM is currently an option that :pyg:`PyG` users are free to switch on, or leave it off *(current default)*. + +Usage +----- + +Here, we show examples of how to use the HGAM functionality in combination with :class:`~torch_geometric.loader.NeighborLoader`: + +* **Homogeneous data example:** + + .. code-block:: python + + from torch_geometric.datasets import Planetoid + from torch_geometric.loader import NeighborLoader + + data = Planetoid(path, name='Cora')[0] + + loader = NeighborLoader( + data, + num_neighbors=[10] * 3, + batch_size=128, + ) + + batch = next(iter(loader)) + print(batch) + >>> Data(x=[1883, 1433], edge_index=[2, 5441], y=[1883], train_mask=[1883], + val_mask=[1883], test_mask=[1883], batch_size=128, + num_sampled_nodes=[4], num_sampled_edges=[3]) + + print(batch.num_sampled_nodes) + >>> [128, 425, 702, 628] # Number of sampled nodes per hop/layer. + print(batch.num_sampled_edges) + >>> [520, 2036, 2885] # Number of sampled edges per hop/layer. + +* **Heterogeneous data example:** + + .. code-block:: python + + from torch_geometric.datasets import OGB_MAG + from torch_geometric.loader import NeighborLoader + + data = OGB_MAG(path)[0] + + loader = NeighborLoader( + data, + num_neighbors=[10] * 3, + batch_size=128, + input_nodes='paper', + ) + + batch = next(iter(loader)) + print(batch) + >>> HeteroData( + paper={ + x=[2275, 128], + num_sampled_nodes=[3], + batch_size=128, + }, + author={ + num_nodes=2541, + num_sampled_nodes=[3], + }, + institution={ + num_nodes=0, + num_sampled_nodes=[3], + }, + field_of_study={ + num_nodes=0, + num_sampled_nodes=[3], + }, + (author, affiliated_with, institution)={ + edge_index=[2, 0], + num_sampled_edges=[2], + }, + (author, writes, paper)={ + edge_index=[2, 3255], + num_sampled_edges=[2], + }, + (paper, cites, paper)={ + edge_index=[2, 2691], + num_sampled_edges=[2], + }, + (paper, has_topic, field_of_study)={ + edge_index=[2, 0], + num_sampled_edges=[2], + } + ) + print(batch['paper'].num_sampled_nodes) + >>> [128, 508, 1598] # Number of sampled paper nodes per hop/layer. + + print(batch['author', 'writes', 'paper'].num_sampled_edges) + >>>> [629, 2621] # Number of sampled autor<>paper edges per hop/layer. + +The attributes :obj:`num_sampled_nodes` and :obj:`num_sampled_edges` can be used by the :meth:`~torch_geometric.utils.trim_to_layer` function inside the GNN: + +.. code-block:: python + + from torch_geometric.datasets import Reddit + from torch_geometric.loader import NeighborLoader + from torch_geometric.nn import SAGEConv + from torch_geometric.utils import trim_to_layer + + dataset = Reddit(path) + loader = NeighborLoader(data, num_neighbors=[10, 5, 5], ...) + + class GNN(torch.nn.Module): + def __init__(self, in_channels: int, out_channels: int, num_layers: int): + super().__init__() + + self.convs = ModuleList([SAGEConv(in_channels, 64)]) + for _ in range(num_layers - 1): + self.convs.append(SAGEConv(hidden_channels, hidden_channels)) + self.lin = Linear(hidden_channels, out_channels) + + def forward( + self, + x: Tensor, + edge_index: Tensor, + num_sampled_nodes_per_hop: List[int], + num_sampled_edges_per_hop: List[int], + ) -> Tensor: + + for i, conv in enumerate(self.convs): + # Trim edge and node information to the current layer `i`. + x, edge_index, _ = trim_to_layer( + i, num_sampled_nodes_per_hop, num_sampled_edges_per_hop, + x, edge_index) + + x = conv(x, edge_index).relu() + + return self.lin(x) + +Examples +-------- + +We provide full examples of HGAM in the :pyg:`PyG` :obj:`examples/` folder: + +* :obj:`examples/hierarchical_sampling.py`: An `example `__ to show-case the basic usage of HGAM. +* :obj:`examples/hetero/hierarchical_sage.py`: An `example `__ of HGAM on heterogeneous graphs. diff --git a/pytorch_geometric-2.3.1/docs/source/advanced/jit.rst b/pytorch_geometric-2.4.0/docs/source/advanced/jit.rst similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/advanced/jit.rst rename to pytorch_geometric-2.4.0/docs/source/advanced/jit.rst diff --git a/pytorch_geometric-2.3.1/docs/source/advanced/remote.rst b/pytorch_geometric-2.4.0/docs/source/advanced/remote.rst similarity index 98% rename from pytorch_geometric-2.3.1/docs/source/advanced/remote.rst rename to pytorch_geometric-2.4.0/docs/source/advanced/remote.rst index bb04394..6b4bd9e 100644 --- a/pytorch_geometric-2.3.1/docs/source/advanced/remote.rst +++ b/pytorch_geometric-2.4.0/docs/source/advanced/remote.rst @@ -107,7 +107,8 @@ An example usage of the interface is shown below: assert torch.equal(row, edge_index[0]) assert torch.equal(col, edge_index[1]) -Common implementations of the :class:`~torch_geometric.data.GraphStore` are graph databases, *e.g.*, :obj:`Neo4j`, :obj:`TigerGraph`, :obj:`ArangoDB` are all viable performant options. +Common implementations of the :class:`~torch_geometric.data.GraphStore` are graph databases, *e.g.*, :obj:`Neo4j`, :obj:`TigerGraph`, :obj:`ArangoDB`, :obj:`Kùzu` are all viable performant options. +We provide an example of using :pyg:`PyG` in combination with the :obj:`Kùzu` database `here `__. A graph sampler is tightly coupled to the given :class:`~torch_geometric.data.GraphStore`, and operates on the :class:`~torch_geometric.data.GraphStore` to produce sampled subgraphs from input nodes. Different sampling algorithms are implemented behind the :class:`torch_geometric.sampler.BaseSampler` interface. diff --git a/pytorch_geometric-2.3.1/docs/source/advanced/sparse_tensor.rst b/pytorch_geometric-2.4.0/docs/source/advanced/sparse_tensor.rst similarity index 100% rename from pytorch_geometric-2.3.1/docs/source/advanced/sparse_tensor.rst rename to pytorch_geometric-2.4.0/docs/source/advanced/sparse_tensor.rst diff --git a/pytorch_geometric-2.4.0/docs/source/cheatsheet/data_cheatsheet.rst b/pytorch_geometric-2.4.0/docs/source/cheatsheet/data_cheatsheet.rst new file mode 100644 index 0000000..a0aad56 --- /dev/null +++ b/pytorch_geometric-2.4.0/docs/source/cheatsheet/data_cheatsheet.rst @@ -0,0 +1,92 @@ +Dataset Cheatsheet +================== + +.. note:: + + This dataset statistics table is a **work in progress**. + Please consider helping us filling its content by providing statistics for individual datasets. + See `here `__ and `here `__ for examples on how to do so. + +Homogeneous Datasets +-------------------- + +.. list-table:: + :widths: 50 10 10 10 10 10 + :header-rows: 1 + + * - Name + - #graphs + - #nodes + - #edges + - #features + - #classes/#tasks +{% for cls in torch_geometric.datasets.homo_datasets %} + * - :class:`~torch_geometric.datasets.{{ cls }}` {% if torch_geometric.datasets.utils.paper_link(cls) %}(`Paper <{{ torch_geometric.datasets.utils.paper_link(cls) }}>`__){% endif %} + - {%if torch_geometric.datasets.utils.has_stats(cls) %}{{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', default=1) }}{% else %}{{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', default='') }}{% endif %} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#nodes', default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#edges', default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#features', default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#classes', default='') }}{{ torch_geometric.datasets.utils.get_stat(cls, '#tasks', default='') }} + {% for child in torch_geometric.datasets.utils.get_children(cls) %} + * - └─ {{ child }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', child, default=1) }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#nodes', child, default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#edges', child, default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#features', child, default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#classes', child, default='') }}{{ torch_geometric.datasets.utils.get_stat(cls, '#tasks', child, default='') }} + {% endfor %} +{% endfor %} + +Heterogeneous Datasets +---------------------- + +.. list-table:: + :widths: 50 30 10 10 + :header-rows: 1 + + * - Name + - #nodes/#edges + - #features + - #classes/#tasks +{% for cls in torch_geometric.datasets.hetero_datasets %} + * - :class:`~torch_geometric.datasets.{{ cls }}` {% if torch_geometric.datasets.utils.paper_link(cls) %}(`Paper <{{ torch_geometric.datasets.utils.paper_link(cls) }}>`__){% endif %} + - + - + - + {% for child in torch_geometric.datasets.utils.get_children(cls) %} + * - └─ **{{torch_geometric.datasets.utils.get_type(child)}} Type**: {{ child }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#nodes/#edges', child, default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#features', child, default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#classes', child, default='') }}{{ torch_geometric.datasets.utils.get_stat(cls, '#tasks', child, default='') }} + {% endfor %} +{% endfor %} + +Synthetic Datasets +------------------ + +.. list-table:: + :widths: 50 10 10 10 10 10 + :header-rows: 1 + + * - Name + - #graphs + - #nodes + - #edges + - #features + - #classes/#tasks +{% for cls in torch_geometric.datasets.synthetic_datasets %} + * - :class:`~torch_geometric.datasets.{{ cls }}` {% if torch_geometric.datasets.utils.paper_link(cls) %}(`Paper <{{ torch_geometric.datasets.utils.paper_link(cls) }}>`__){% endif %} + - {%if torch_geometric.datasets.utils.has_stats(cls) %}{{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', default=1) }}{% else %}{{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', default='') }}{% endif %} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#nodes', default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#edges', default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#features', default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#classes', default='') }}{{ torch_geometric.datasets.utils.get_stat(cls, '#tasks', default='') }} + {% for child in torch_geometric.datasets.utils.get_children(cls) %} + * - └─ {{ child }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#graphs', child, default=1) }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#nodes', child, default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#edges', child, default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#features', child, default='') }} + - {{ torch_geometric.datasets.utils.get_stat(cls, '#classes', child, default='') }}{{ torch_geometric.datasets.utils.get_stat(cls, '#tasks', child, default='') }} + {% endfor %} +{% endfor %} diff --git a/pytorch_geometric-2.3.1/docs/source/cheatsheet/gnn_cheatsheet.rst b/pytorch_geometric-2.4.0/docs/source/cheatsheet/gnn_cheatsheet.rst similarity index 86% rename from pytorch_geometric-2.3.1/docs/source/cheatsheet/gnn_cheatsheet.rst rename to pytorch_geometric-2.4.0/docs/source/cheatsheet/gnn_cheatsheet.rst index 6d44692..646aa49 100644 --- a/pytorch_geometric-2.3.1/docs/source/cheatsheet/gnn_cheatsheet.rst +++ b/pytorch_geometric-2.4.0/docs/source/cheatsheet/gnn_cheatsheet.rst @@ -31,7 +31,7 @@ Graph Neural Network Operators {% if not torch_geometric.nn.conv.utils.processes_heterogeneous_graphs(cls) and not torch_geometric.nn.conv.utils.processes_hypergraphs(cls) and not torch_geometric.nn.conv.utils.processes_point_clouds(cls) %} - * - :class:`~torch_geometric.nn.conv.{{ cls }}` (`Paper <{{ torch_geometric.nn.conv.utils.paper_link(cls) }}>`__) + * - :class:`~torch_geometric.nn.conv.{{ cls }}` {% if torch_geometric.nn.conv.utils.paper_link(cls) %}(`Paper <{{ torch_geometric.nn.conv.utils.paper_link(cls) }}>`__){% endif %} - {% if torch_geometric.nn.conv.utils.supports_sparse_tensor(cls) %}✓{% endif %} - {% if torch_geometric.nn.conv.utils.supports_edge_weights(cls) %}✓{% endif %} - {% if torch_geometric.nn.conv.utils.supports_edge_features(cls) %}✓{% endif %} @@ -57,7 +57,7 @@ Heterogeneous Graph Neural Network Operators - lazy {% for cls in torch_geometric.nn.conv.classes[1:] %} {% if torch_geometric.nn.conv.utils.processes_heterogeneous_graphs(cls) %} - * - :class:`~torch_geometric.nn.conv.{{ cls }}` (`Paper <{{ torch_geometric.nn.conv.utils.paper_link(cls) }}>`__) + * - :class:`~torch_geometric.nn.conv.{{ cls }}` {% if torch_geometric.nn.conv.utils.paper_link(cls) %}(`Paper <{{ torch_geometric.nn.conv.utils.paper_link(cls) }}>`__){% endif %} - {% if torch_geometric.nn.conv.utils.supports_sparse_tensor(cls) %}✓{% endif %} - {% if torch_geometric.nn.conv.utils.supports_edge_weights(cls) %}✓{% endif %} - {% if torch_geometric.nn.conv.utils.supports_edge_features(cls) %}✓{% endif %} @@ -83,7 +83,7 @@ Hypergraph Neural Network Operators - lazy {% for cls in torch_geometric.nn.conv.classes[1:] %} {% if torch_geometric.nn.conv.utils.processes_hypergraphs(cls) %} - * - :class:`~torch_geometric.nn.conv.{{ cls }}` (`Paper <{{ torch_geometric.nn.conv.utils.paper_link(cls) }}>`__) + * - :class:`~torch_geometric.nn.conv.{{ cls }}` {% if torch_geometric.nn.conv.utils.paper_link(cls) %}(`Paper <{{ torch_geometric.nn.conv.utils.paper_link(cls) }}>`__){% endif %} - {% if torch_geometric.nn.conv.utils.supports_sparse_tensor(cls) %}✓{% endif %} - {% if torch_geometric.nn.conv.utils.supports_edge_weights(cls) %}✓{% endif %} - {% if torch_geometric.nn.conv.utils.supports_edge_features(cls) %}✓{% endif %} @@ -105,7 +105,7 @@ Point Cloud Neural Network Operators - lazy {% for cls in torch_geometric.nn.conv.classes[1:] %} {% if torch_geometric.nn.conv.utils.processes_point_clouds(cls) %} - * - :class:`~torch_geometric.nn.conv.{{ cls }}` (`Paper <{{ torch_geometric.nn.conv.utils.paper_link(cls) }}>`__) + * - :class:`~torch_geometric.nn.conv.{{ cls }}` {% if torch_geometric.nn.conv.utils.paper_link(cls) %}(`Paper <{{ torch_geometric.nn.conv.utils.paper_link(cls) }}>`__){% endif %} - {% if torch_geometric.nn.conv.utils.supports_bipartite_graphs(cls) %}✓{% endif %} - {% if torch_geometric.nn.conv.utils.supports_lazy_initialization(cls) %}✓{% endif %} {% endif %} diff --git a/pytorch_geometric-2.4.0/docs/source/conf.py b/pytorch_geometric-2.4.0/docs/source/conf.py new file mode 100644 index 0000000..6abc74a --- /dev/null +++ b/pytorch_geometric-2.4.0/docs/source/conf.py @@ -0,0 +1,66 @@ +import datetime +import os.path as osp +import sys + +import pyg_sphinx_theme + +import torch_geometric + +author = 'PyG Team' +project = 'pytorch_geometric' +version = torch_geometric.__version__ +copyright = f'{datetime.datetime.now().year}, {author}' + +sys.path.append(osp.join(osp.dirname(pyg_sphinx_theme.__file__), 'extension')) + +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.autosummary', + 'sphinx.ext.intersphinx', + 'sphinx.ext.mathjax', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode', + 'nbsphinx', + 'pyg', +] + +html_theme = 'pyg_sphinx_theme' +html_logo = ('https://raw.githubusercontent.com/pyg-team/pyg_sphinx_theme/' + 'master/pyg_sphinx_theme/static/img/pyg_logo.png') +html_favicon = ('https://raw.githubusercontent.com/pyg-team/pyg_sphinx_theme/' + 'master/pyg_sphinx_theme/static/img/favicon.png') +html_static_path = ['_static'] +templates_path = ['_templates'] + +add_module_names = False +autodoc_member_order = 'bysource' + +suppress_warnings = ['autodoc.import_object'] + +intersphinx_mapping = { + 'python': ('https://docs.python.org/', None), + 'numpy': ('http://docs.scipy.org/doc/numpy', None), + 'pandas': ('http://pandas.pydata.org/pandas-docs/dev', None), + 'torch': ('https://pytorch.org/docs/master', None), +} + +nbsphinx_thumbnails = { + 'tutorial/create_gnn': '_static/thumbnails/create_gnn.png', + 'tutorial/heterogeneous': '_static/thumbnails/heterogeneous.png', + 'tutorial/create_dataset': '_static/thumbnails/create_dataset.png', + 'tutorial/load_csv': '_static/thumbnails/load_csv.png', + 'tutorial/neighbor_loader': '_static/thumbnails/neighbor_loader.png', + 'tutorial/explain': '_static/thumbnails/explain.png', + 'tutorial/shallow_node_embeddings': + '_static/thumbnails/shallow_node_embeddings.png', + 'tutorial/multi_gpu_vanilla': '_static/thumbnails/multi_gpu_vanilla.png', +} + + +def setup(app): + def rst_jinja_render(app, _, source): + rst_context = {'torch_geometric': torch_geometric} + source[0] = app.builder.templates.render_string(source[0], rst_context) + + app.connect('source-read', rst_jinja_render) + app.add_js_file('js/version_alert.js') diff --git a/pytorch_geometric-2.3.1/docs/source/external/resources.rst b/pytorch_geometric-2.4.0/docs/source/external/resources.rst similarity index 90% rename from pytorch_geometric-2.3.1/docs/source/external/resources.rst rename to pytorch_geometric-2.4.0/docs/source/external/resources.rst index bd437e3..0443165 100644 --- a/pytorch_geometric-2.3.1/docs/source/external/resources.rst +++ b/pytorch_geometric-2.4.0/docs/source/external/resources.rst @@ -38,3 +38,7 @@ External Resources * Amitoz Azad: **Primal-Dual Algorithm for Total Variation Processing on Graphs** [`Jupyter `__] * Manan Goel: **Recommending Amazon Products using Graph Neural Networks in** :pyg:`null` **PyTorch Geometric** [:wandb:`null` `W&B Report `__] + +* Kùzu: **Remote Backend for** :pyg:`null` **PyTorch Geometric** [:colab:`null` `Colab `__] + +* Aniket Saxena: **Graph Neural Networks-based Node and Graph Classification, and Explanation App using** :pyg:`null` **PyTorch Geometric** [`Website `__, :github:`null` `GitHub `__] diff --git a/pytorch_geometric-2.3.1/docs/source/get_started/colabs.rst b/pytorch_geometric-2.4.0/docs/source/get_started/colabs.rst similarity index 98% rename from pytorch_geometric-2.3.1/docs/source/get_started/colabs.rst rename to pytorch_geometric-2.4.0/docs/source/get_started/colabs.rst index 46a20ac..9ad7726 100644 --- a/pytorch_geometric-2.3.1/docs/source/get_started/colabs.rst +++ b/pytorch_geometric-2.4.0/docs/source/get_started/colabs.rst @@ -16,6 +16,7 @@ We have prepared a list of :colab:`Colab` notebooks that practically introduces 8. `Node Classification Instrumented with `__ :wandb:`null` `Weights&Biases `__ 9. `Graph Classification Instrumented with `__ :wandb:`null` `Weights&Biases `__ 10. `Link Prediction on MovieLens `__ +11. `Link Regression on MovieLens `__ All :colab:`Colab` notebooks are released under the MIT license. diff --git a/pytorch_geometric-2.3.1/docs/source/get_started/introduction.rst b/pytorch_geometric-2.4.0/docs/source/get_started/introduction.rst similarity index 98% rename from pytorch_geometric-2.3.1/docs/source/get_started/introduction.rst rename to pytorch_geometric-2.4.0/docs/source/get_started/introduction.rst index b4ccb8e..c26629d 100644 --- a/pytorch_geometric-2.3.1/docs/source/get_started/introduction.rst +++ b/pytorch_geometric-2.4.0/docs/source/get_started/introduction.rst @@ -3,7 +3,7 @@ Introduction by Example We shortly introduce the fundamental concepts of :pyg:`PyG` through self-contained examples. -For an introduction to Graph Machine Learning, we refer the interested reader to the :stanford:`null` `Stanford CS22W: Machine Learning with Graphs `__ lectures. +For an introduction to Graph Machine Learning, we refer the interested reader to the :stanford:`null` `Stanford CS224W: Machine Learning with Graphs `__ lectures. For an interactive introduction to :pyg:`PyG`, we recommend our carefully curated :colab:`null` `Google Colab `__ notebooks. At its core, :pyg:`PyG` provides the following main features: @@ -86,7 +86,7 @@ Besides holding a number of node-level, edge-level or graph-level attributes, :c .. code-block:: python - print(data.keys) + print(data.keys()) >>> ['x', 'edge_index'] print(data['x']) @@ -392,7 +392,7 @@ Now let's implement a two-layer GCN: return F.log_softmax(x, dim=1) The constructor defines two :class:`~torch_geometric.nn.conv.GCNConv` layers which get called in the forward pass of our network. -Note that the non-linearity is not integrated in the :obj:`conv` calls and hence needs to be applied afterwards (something which is consistent accross all operators in :pyg:`PyG`). +Note that the non-linearity is not integrated in the :obj:`conv` calls and hence needs to be applied afterwards (something which is consistent across all operators in :pyg:`PyG`). Here, we chose to use ReLU as our intermediate non-linearity and finally output a softmax distribution over the number of classes. Let's train this model on the training nodes for 200 epochs: diff --git a/pytorch_geometric-2.3.1/docs/source/index.rst b/pytorch_geometric-2.4.0/docs/source/index.rst similarity index 91% rename from pytorch_geometric-2.3.1/docs/source/index.rst rename to pytorch_geometric-2.4.0/docs/source/index.rst index 5c18cbd..7f231c7 100644 --- a/pytorch_geometric-2.3.1/docs/source/index.rst +++ b/pytorch_geometric-2.4.0/docs/source/index.rst @@ -6,7 +6,7 @@ PyG Documentation :pyg:`null` **PyG** *(PyTorch Geometric)* is a library built upon :pytorch:`null` `PyTorch `_ to easily write and train Graph Neural Networks (GNNs) for a wide range of applications related to structured data. It consists of various methods for deep learning on graphs and other irregular structures, also known as `geometric deep learning `_, from a variety of published papers. -In addition, it consists of easy-to-use mini-batch loaders for operating on many small and single giant graphs, `multi GPU-support `_, `torch.compile `_ support, `DataPipe `_ support, a large number of common benchmark datasets (based on simple interfaces to create your own), the `GraphGym `__ experiment manager, and helpful transforms, both for learning on arbitrary graphs as well as on 3D meshes or point clouds. +In addition, it consists of easy-to-use mini-batch loaders for operating on many small and single giant graphs, `multi GPU-support `_, `torch.compile `_ support, `DataPipe `_ support, a large number of common benchmark datasets (based on simple interfaces to create your own), the `GraphGym `__ experiment manager, and helpful transforms, both for learning on arbitrary graphs as well as on 3D meshes or point clouds. .. slack_button:: @@ -27,12 +27,10 @@ In addition, it consists of easy-to-use mini-batch loaders for operating on many :maxdepth: 1 :caption: Tutorials - tutorial/create_gnn - tutorial/create_dataset - tutorial/heterogeneous - tutorial/load_csv - tutorial/explain - tutorial/compile + tutorial/gnn_design + tutorial/dataset + tutorial/application + tutorial/multi_gpu .. toctree:: :maxdepth: 1 @@ -40,6 +38,8 @@ In addition, it consists of easy-to-use mini-batch loaders for operating on many advanced/batching advanced/sparse_tensor + advanced/hgam + advanced/compile advanced/jit advanced/remote advanced/graphgym diff --git a/pytorch_geometric-2.3.1/docs/source/install/installation.rst b/pytorch_geometric-2.4.0/docs/source/install/installation.rst similarity index 85% rename from pytorch_geometric-2.3.1/docs/source/install/installation.rst rename to pytorch_geometric-2.4.0/docs/source/install/installation.rst index 38b63dd..56da2f7 100644 --- a/pytorch_geometric-2.3.1/docs/source/install/installation.rst +++ b/pytorch_geometric-2.4.0/docs/source/install/installation.rst @@ -1,7 +1,7 @@ Installation ============ -:pyg:`PyG` is available for Python 3.7 to Python 3.11. +:pyg:`PyG` is available for Python 3.8 to Python 3.11. .. note:: We do not recommend installation as a root user on your system Python. @@ -18,7 +18,7 @@ Installation via Anaconda You can now install :pyg:`PyG` via `Anaconda `_ for all major OS, :pytorch:`PyTorch` and CUDA combinations 🤗 If you have not yet installed :pytorch:`PyTorch`, install it via :obj:`conda` as described in its `official documentation `_. -Given that you have :pytorch:`PyTorch` installed (:obj:`>=1.8.0`), simply run +Given that you have :pytorch:`PyTorch` installed (:obj:`>=1.11.0`), simply run .. code-block:: none @@ -31,12 +31,14 @@ Installation via PyPi --------------------- From :pyg:`null` **PyG 2.3** onwards, you can install and use :pyg:`PyG` **without any external library** required except for :pytorch:`PyTorch`. -For this, simply run +For this, simply run: .. code-block:: none pip install torch_geometric +PyG 2.3 requires that at least PyTorch 1.11 is installed. + Additional Libraries -------------------- @@ -48,7 +50,7 @@ If you want to utilize the full set of features from :pyg:`PyG`, there exists se * `torch-cluster `__: Graph clustering routines * `torch-spline-conv `__: :class:`~torch_geometric.nn.conv.SplineConv` support -These packages come with their own CPU and GPU kernel implementations based on the :pytorch:`null` `PyTorch C++/CUDA extension interface `_. +These packages come with their own CPU and GPU kernel implementations based on the :pytorch:`null` `PyTorch C++/CUDA/hip(ROCm) extension interface `_. For a basic usage of :pyg:`PyG`, these dependencies are **fully optional**. We recommend to start with a minimal installation, and install additional dependencies once you start to actually need them. @@ -61,19 +63,19 @@ For ease of installation of these extensions, we provide :obj:`pip` wheels for t Wheels are currently not available for M1/M2/M3 macs. Please install the extension packages `from source `__. -#. Ensure that at least :pytorch:`PyTorch` 1.12.0 is installed: +#. Ensure that at least :pytorch:`PyTorch` 1.11.0 is installed: .. code-block:: none python -c "import torch; print(torch.__version__)" - >>> 2.0.0 + >>> 2.1.0 #. Find the CUDA version :pytorch:`PyTorch` was installed with: .. code-block:: none python -c "import torch; print(torch.version.cuda)" - >>> 11.7 + >>> 11.8 #. Install the relevant packages: @@ -83,25 +85,29 @@ For ease of installation of these extensions, we provide :obj:`pip` wheels for t where :obj:`${TORCH}` and :obj:`${CUDA}` should be replaced by the specific :pytorch:`PyTorch` and CUDA versions, respectively: + * :pytorch:`PyTorch` 2.1: :obj:`${TORCH}=2.1.0` and :obj:`${CUDA}=cpu|cu118|cu121` * :pytorch:`PyTorch` 2.0: :obj:`${TORCH}=2.0.0` and :obj:`${CUDA}=cpu|cu117|cu118` * :pytorch:`PyTorch` 1.13: :obj:`${TORCH}=1.13.0` and :obj:`${CUDA}=cpu|cu116|cu117` - For example, for :pytorch:`PyTorch` 2.0.* and CUDA 11.7, type: + For example, for :pytorch:`PyTorch` 2.1.* and CUDA 12.1, type: .. code-block:: none - pip install pyg_lib torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://data.pyg.org/whl/torch-2.0.0+cu117.html + pip install pyg_lib torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://data.pyg.org/whl/torch-2.1.0+cu121.html - For :pytorch:`PyTorch` 1.13.* and CUDA 11.6, type: + For example, for :pytorch:`PyTorch` 2.0.* and CUDA 11.8, type: .. code-block:: none - pip install pyg_lib torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://data.pyg.org/whl/torch-1.13.0+cu116.html + pip install pyg_lib torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://data.pyg.org/whl/torch-2.0.0+cu118.html -**Note:** Binaries of older versions are also provided for :pytorch:`PyTorch` 1.4.0, 1.5.0, 1.6.0, 1.7.0, 1.7.1, 1.8.0, 1.8.1, 1.9.0, 1.10.0, 1.10.1, 1.10.2, 1.11.0, 1.12.0 and 1.12.1 (following the same procedure). +**Note:** Binaries of older versions are also provided for :pytorch:`PyTorch` 1.4.0, 1.5.0, 1.6.0, 1.7.0/1.7.1, 1.8.0/1.8.1, 1.9.0, 1.10.0/1.10.1/1.10.2, 1.11.0, 1.12.0/1.12.1 and 1.13.0/1.13.1 (following the same procedure). **For older versions, you need to explicitly specify the latest supported version number** or install via :obj:`pip install --no-index` in order to prevent a manual installation from source. You can look up the latest supported version number `here `__. +**ROCm:** The external `pyg-rocm-build repository `__ provides wheels and detailed instructions on how to install :pyg:`PyG` for ROCm. +If you have any questions about it, please open an issue `here `__. + Installation from Source ~~~~~~~~~~~~~~~~~~~~~~~~ @@ -145,17 +151,17 @@ In case a specific version is not supported by `our wheels >> 11.6 + >>> 11.8 #. Ensure that :pytorch:`PyTorch` and system CUDA versions match: .. code-block:: none python -c "import torch; print(torch.version.cuda)" - >>> 11.6 + >>> 11.8 nvcc --version - >>> 11.6 + >>> 11.8 #. Install the relevant packages: diff --git a/pytorch_geometric-2.3.1/docs/source/install/quick-start.html b/pytorch_geometric-2.4.0/docs/source/install/quick-start.html similarity index 87% rename from pytorch_geometric-2.3.1/docs/source/install/quick-start.html rename to pytorch_geometric-2.4.0/docs/source/install/quick-start.html index 570383d..b9af902 100644 --- a/pytorch_geometric-2.3.1/docs/source/install/quick-start.html +++ b/pytorch_geometric-2.4.0/docs/source/install/quick-start.html @@ -75,8 +75,8 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -__PLOT_DIV_SPEC_GOES_HERE__ - - - diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/examples.css b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/examples.css deleted file mode 100644 index d63816a..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/examples.css +++ /dev/null @@ -1,146 +0,0 @@ -* { padding: 0; margin: 0; vertical-align: top; } - -body { - background: url(background.png) repeat-x; - font: 18px "proxima-nova", Helvetica, Arial, sans-serif; - line-height: 1.5; -} - -a { color: #069; } -a:hover { color: #28b; } - -h2 { - margin-top: 15px; - font: normal 32px "omnes-pro", Helvetica, Arial, sans-serif; -} - -h3 { - margin-left: 30px; - font: normal 26px "omnes-pro", Helvetica, Arial, sans-serif; - color: #666; -} - -p { - margin-top: 10px; -} - -button { - font-size: 18px; - padding: 1px 7px; -} - -input { - font-size: 18px; -} - -input[type=checkbox] { - margin: 7px; -} - -#header { - position: relative; - width: 900px; - margin: auto; -} - -#header h2 { - margin-left: 10px; - vertical-align: middle; - font-size: 42px; - font-weight: bold; - text-decoration: none; - color: #000; -} - -#content { - width: 880px; - margin: 0 auto; - padding: 10px; -} - -#footer { - margin-top: 25px; - margin-bottom: 10px; - text-align: center; - font-size: 12px; - color: #999; -} - -.demo-container { - box-sizing: border-box; - width: 850px; - height: 450px; - padding: 20px 15px 15px 15px; - margin: 15px auto 30px auto; - border: 1px solid #ddd; - background: #fff; - background: linear-gradient(#f6f6f6 0, #fff 50px); - background: -o-linear-gradient(#f6f6f6 0, #fff 50px); - background: -ms-linear-gradient(#f6f6f6 0, #fff 50px); - background: -moz-linear-gradient(#f6f6f6 0, #fff 50px); - background: -webkit-linear-gradient(#f6f6f6 0, #fff 50px); - box-shadow: 0 3px 10px rgba(0,0,0,0.15); - -o-box-shadow: 0 3px 10px rgba(0,0,0,0.1); - -ms-box-shadow: 0 3px 10px rgba(0,0,0,0.1); - -moz-box-shadow: 0 3px 10px rgba(0,0,0,0.1); - -webkit-box-shadow: 0 3px 10px rgba(0,0,0,0.1); - -webkit-tap-highlight-color: rgba(0,0,0,0); - -webkit-tap-highlight-color: transparent; - -webkit-touch-callout: none; - -webkit-user-select: none; - -khtml-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - user-select: none; -} - -.demo-placeholder { - width: 100%; - height: 100%; - font-size: 14px; -} - -fieldset { - display: block; - -webkit-margin-start: 2px; - -webkit-margin-end: 2px; - -webkit-padding-before: 0.35em; - -webkit-padding-start: 0.75em; - -webkit-padding-end: 0.75em; - -webkit-padding-after: 0.625em; - min-width: -webkit-min-content; - border-width: 2px; - border-style: groove; - border-color: threedface; - border-image: initial; - padding: 10px; -} - -.legend { - display: block; - -webkit-padding-start: 2px; - -webkit-padding-end: 2px; - border-width: initial; - border-style: none; - border-color: initial; - border-image: initial; - padding-left: 10px; - padding-right: 10px; - padding-top: 10px; - padding-bottom: 10px; -} - -.legendLayer .background { - fill: rgba(255, 255, 255, 0.85); - stroke: rgba(0, 0, 0, 0.85); - stroke-width: 1; -} - -input[type="radio"] { - margin-top: -1px; - vertical-align: middle; -} - -.tickLabel { - line-height: 1.1; -} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.canvaswrapper.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.canvaswrapper.js deleted file mode 100644 index 69eb0f3..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.canvaswrapper.js +++ /dev/null @@ -1,538 +0,0 @@ -/** ## jquery.flot.canvaswrapper - -This plugin contains the function for creating and manipulating both the canvas -layers and svg layers. - -The Canvas object is a wrapper around an HTML5 canvas tag. -The constructor Canvas(cls, container) takes as parameters cls, -the list of classes to apply to the canvas adnd the containter, -element onto which to append the canvas. The canvas operations -don't work unless the canvas is attached to the DOM. - -### jquery.canvaswrapper.js API functions -*/ - -(function($) { - var Canvas = function(cls, container) { - var element = container.getElementsByClassName(cls)[0]; - - if (!element) { - element = document.createElement('canvas'); - element.className = cls; - element.style.direction = 'ltr'; - element.style.position = 'absolute'; - element.style.left = '0px'; - element.style.top = '0px'; - - container.appendChild(element); - - // If HTML5 Canvas isn't available, throw - - if (!element.getContext) { - throw new Error('Canvas is not available.'); - } - } - - this.element = element; - - var context = this.context = element.getContext('2d'); - this.pixelRatio = $.plot.browser.getPixelRatio(context); - - // Size the canvas to match the internal dimensions of its container - - var box = container.getBoundingClientRect(); - this.resize(box.width, box.height); - - // Collection of HTML div layers for text overlaid onto the canvas - - this.SVGContainer = null; - this.SVG = {}; - - // Cache of text fragments and metrics, so we can avoid expensively - // re-calculating them when the plot is re-rendered in a loop. - - this._textCache = {}; - } - - /** - - resize(width, height) - - Resizes the canvas to the given dimensions. - The width represents the new width of the canvas, meanwhile the height - is the new height of the canvas, both of them in pixels. - */ - - Canvas.prototype.resize = function(width, height) { - var minSize = 10; - width = width < minSize ? minSize : width; - height = height < minSize ? minSize : height; - - var element = this.element, - context = this.context, - pixelRatio = this.pixelRatio; - - // Resize the canvas, increasing its density based on the display's - // pixel ratio; basically giving it more pixels without increasing the - // size of its element, to take advantage of the fact that retina - // displays have that many more pixels in the same advertised space. - - // Resizing should reset the state (excanvas seems to be buggy though) - - if (this.width !== width) { - element.width = width * pixelRatio; - element.style.width = width + 'px'; - this.width = width; - } - - if (this.height !== height) { - element.height = height * pixelRatio; - element.style.height = height + 'px'; - this.height = height; - } - - // Save the context, so we can reset in case we get replotted. The - // restore ensure that we're really back at the initial state, and - // should be safe even if we haven't saved the initial state yet. - - context.restore(); - context.save(); - - // Scale the coordinate space to match the display density; so even though we - // may have twice as many pixels, we still want lines and other drawing to - // appear at the same size; the extra pixels will just make them crisper. - - context.scale(pixelRatio, pixelRatio); - }; - - /** - - clear() - - Clears the entire canvas area, not including any overlaid HTML text - */ - Canvas.prototype.clear = function() { - this.context.clearRect(0, 0, this.width, this.height); - }; - - /** - - render() - - Finishes rendering the canvas, including managing the text overlay. - */ - Canvas.prototype.render = function() { - var cache = this._textCache; - - // For each text layer, add elements marked as active that haven't - // already been rendered, and remove those that are no longer active. - - for (var layerKey in cache) { - if (hasOwnProperty.call(cache, layerKey)) { - var layer = this.getSVGLayer(layerKey), - layerCache = cache[layerKey]; - - var display = layer.style.display; - layer.style.display = 'none'; - - for (var styleKey in layerCache) { - if (hasOwnProperty.call(layerCache, styleKey)) { - var styleCache = layerCache[styleKey]; - for (var key in styleCache) { - if (hasOwnProperty.call(styleCache, key)) { - var val = styleCache[key], - positions = val.positions; - - for (var i = 0, position; positions[i]; i++) { - position = positions[i]; - if (position.active) { - if (!position.rendered) { - layer.appendChild(position.element); - position.rendered = true; - } - } else { - positions.splice(i--, 1); - if (position.rendered) { - while (position.element.firstChild) { - position.element.removeChild(position.element.firstChild); - } - position.element.parentNode.removeChild(position.element); - } - } - } - - if (positions.length === 0) { - if (val.measured) { - val.measured = false; - } else { - delete styleCache[key]; - } - } - } - } - } - } - - layer.style.display = display; - } - } - }; - - /** - - getSVGLayer(classes) - - Creates (if necessary) and returns the SVG overlay container. - The classes string represents the string of space-separated CSS classes - used to uniquely identify the text layer. It return the svg-layer div. - */ - Canvas.prototype.getSVGLayer = function(classes) { - var layer = this.SVG[classes]; - - // Create the SVG layer if it doesn't exist - - if (!layer) { - // Create the svg layer container, if it doesn't exist - - var svgElement; - - if (!this.SVGContainer) { - this.SVGContainer = document.createElement('div'); - this.SVGContainer.className = 'flot-svg'; - this.SVGContainer.style.position = 'absolute'; - this.SVGContainer.style.top = '0px'; - this.SVGContainer.style.left = '0px'; - this.SVGContainer.style.bottom = '0px'; - this.SVGContainer.style.right = '0px'; - this.SVGContainer.style.pointerEvents = 'none'; - this.element.parentNode.appendChild(this.SVGContainer); - - svgElement = document.createElementNS('http://www.w3.org/2000/svg', 'svg'); - svgElement.style.width = '100%'; - svgElement.style.height = '100%'; - - this.SVGContainer.appendChild(svgElement); - } else { - svgElement = this.SVGContainer.firstChild; - } - - layer = document.createElementNS('http://www.w3.org/2000/svg', 'g'); - layer.setAttribute('class', classes); - layer.style.position = 'absolute'; - layer.style.top = '0px'; - layer.style.left = '0px'; - layer.style.bottom = '0px'; - layer.style.right = '0px'; - svgElement.appendChild(layer); - this.SVG[classes] = layer; - } - - return layer; - }; - - /** - - getTextInfo(layer, text, font, angle, width) - - Creates (if necessary) and returns a text info object. - The object looks like this: - ```js - { - width //Width of the text's wrapper div. - height //Height of the text's wrapper div. - element //The HTML div containing the text. - positions //Array of positions at which this text is drawn. - } - ``` - The positions array contains objects that look like this: - ```js - { - active //Flag indicating whether the text should be visible. - rendered //Flag indicating whether the text is currently visible. - element //The HTML div containing the text. - text //The actual text and is identical with element[0].textContent. - x //X coordinate at which to draw the text. - y //Y coordinate at which to draw the text. - } - ``` - Each position after the first receives a clone of the original element. - The idea is that that the width, height, and general 'identity' of the - text is constant no matter where it is placed; the placements are a - secondary property. - - Canvas maintains a cache of recently-used text info objects; getTextInfo - either returns the cached element or creates a new entry. - - The layer parameter is string of space-separated CSS classes uniquely - identifying the layer containing this text. - Text is the text string to retrieve info for. - Font is either a string of space-separated CSS classes or a font-spec object, - defining the text's font and style. - Angle is the angle at which to rotate the text, in degrees. Angle is currently unused, - it will be implemented in the future. - The last parameter is the Maximum width of the text before it wraps. - The method returns a text info object. - */ - Canvas.prototype.getTextInfo = function(layer, text, font, angle, width) { - var textStyle, layerCache, styleCache, info; - - // Cast the value to a string, in case we were given a number or such - - text = '' + text; - - // If the font is a font-spec object, generate a CSS font definition - - if (typeof font === 'object') { - textStyle = font.style + ' ' + font.variant + ' ' + font.weight + ' ' + font.size + 'px/' + font.lineHeight + 'px ' + font.family; - } else { - textStyle = font; - } - - // Retrieve (or create) the cache for the text's layer and styles - - layerCache = this._textCache[layer]; - - if (layerCache == null) { - layerCache = this._textCache[layer] = {}; - } - - styleCache = layerCache[textStyle]; - - if (styleCache == null) { - styleCache = layerCache[textStyle] = {}; - } - - var key = generateKey(text); - info = styleCache[key]; - - // If we can't find a matching element in our cache, create a new one - - if (!info) { - var element = document.createElementNS('http://www.w3.org/2000/svg', 'text'); - if (text.indexOf('
') !== -1) { - addTspanElements(text, element, -9999); - } else { - var textNode = document.createTextNode(text); - element.appendChild(textNode); - } - - element.style.position = 'absolute'; - element.style.maxWidth = width; - element.setAttributeNS(null, 'x', -9999); - element.setAttributeNS(null, 'y', -9999); - - if (typeof font === 'object') { - element.style.font = textStyle; - element.style.fill = font.fill; - } else if (typeof font === 'string') { - element.setAttribute('class', font); - } - - this.getSVGLayer(layer).appendChild(element); - var elementRect = element.getBBox(); - - info = styleCache[key] = { - width: elementRect.width, - height: elementRect.height, - measured: true, - element: element, - positions: [] - }; - - //remove elements from dom - while (element.firstChild) { - element.removeChild(element.firstChild); - } - element.parentNode.removeChild(element); - } - - info.measured = true; - return info; - }; - - /** - - addText (layer, x, y, text, font, angle, width, halign, valign, transforms) - - Adds a text string to the canvas text overlay. - The text isn't drawn immediately; it is marked as rendering, which will - result in its addition to the canvas on the next render pass. - - The layer is string of space-separated CSS classes uniquely - identifying the layer containing this text. - X and Y represents the X and Y coordinate at which to draw the text. - and text is the string to draw - */ - Canvas.prototype.addText = function(layer, x, y, text, font, angle, width, halign, valign, transforms) { - var info = this.getTextInfo(layer, text, font, angle, width), - positions = info.positions; - - // Tweak the div's position to match the text's alignment - - if (halign === 'center') { - x -= info.width / 2; - } else if (halign === 'right') { - x -= info.width; - } - - if (valign === 'middle') { - y -= info.height / 2; - } else if (valign === 'bottom') { - y -= info.height; - } - - y += 0.75 * info.height; - - // Determine whether this text already exists at this position. - // If so, mark it for inclusion in the next render pass. - - for (var i = 0, position; positions[i]; i++) { - position = positions[i]; - if (position.x === x && position.y === y && position.text === text) { - position.active = true; - return; - } else if (position.active === false) { - position.active = true; - position.text = text; - if (text.indexOf('
') !== -1) { - y -= 0.25 * info.height; - addTspanElements(text, position.element, x); - } else { - position.element.textContent = text; - } - position.element.setAttributeNS(null, 'x', x); - position.element.setAttributeNS(null, 'y', y); - position.x = x; - position.y = y; - return; - } - } - - // If the text doesn't exist at this position, create a new entry - - // For the very first position we'll re-use the original element, - // while for subsequent ones we'll clone it. - - position = { - active: true, - rendered: false, - element: positions.length ? info.element.cloneNode() : info.element, - text: text, - x: x, - y: y - }; - - positions.push(position); - - if (text.indexOf('
') !== -1) { - y -= 0.25 * info.height; - addTspanElements(text, position.element, x); - } else { - position.element.textContent = text; - } - - // Move the element to its final position within the container - position.element.setAttributeNS(null, 'x', x); - position.element.setAttributeNS(null, 'y', y); - position.element.style.textAlign = halign; - - if (transforms) { - transforms.forEach(function(t) { - info.element.transform.baseVal.appendItem(t); - }); - } - }; - - var addTspanElements = function(text, element, x) { - var lines = text.split('
'), - tspan, i, offset; - - for (i = 0; i < lines.length; i++) { - if (!element.childNodes[i]) { - tspan = document.createElementNS('http://www.w3.org/2000/svg', 'tspan'); - element.appendChild(tspan); - } else { - tspan = element.childNodes[i]; - } - tspan.textContent = lines[i]; - offset = i * 1 + 'em'; - tspan.setAttributeNS(null, 'dy', offset); - tspan.setAttributeNS(null, 'x', x); - } - } - - /** - - removeText (layer, x, y, text, font, angle) - - The function removes one or more text strings from the canvas text overlay. - If no parameters are given, all text within the layer is removed. - - Note that the text is not immediately removed; it is simply marked as - inactive, which will result in its removal on the next render pass. - This avoids the performance penalty for 'clear and redraw' behavior, - where we potentially get rid of all text on a layer, but will likely - add back most or all of it later, as when redrawing axes, for example. - - The layer is a string of space-separated CSS classes uniquely - identifying the layer containing this text. The following parameter are - X and Y coordinate of the text. - Text is the string to remove, while the font is either a string of space-separated CSS - classes or a font-spec object, defining the text's font and style. - */ - Canvas.prototype.removeText = function(layer, x, y, text, font, angle) { - var info, htmlYCoord; - if (text == null) { - var layerCache = this._textCache[layer]; - if (layerCache != null) { - for (var styleKey in layerCache) { - if (hasOwnProperty.call(layerCache, styleKey)) { - var styleCache = layerCache[styleKey]; - for (var key in styleCache) { - if (hasOwnProperty.call(styleCache, key)) { - var positions = styleCache[key].positions; - positions.forEach(function(position) { - position.active = false; - }); - } - } - } - } - } - } else { - info = this.getTextInfo(layer, text, font, angle); - positions = info.positions; - positions.forEach(function(position) { - htmlYCoord = y + 0.75 * info.height; - if (position.x === x && position.y === htmlYCoord && position.text === text) { - position.active = false; - } - }); - } - }; - - /** - - clearCache() - - Clears the cache used to speed up the text size measurements. - As an (unfortunate) side effect all text within the text Layer is removed. - Use this function before plot.setupGrid() and plot.draw() if the plot just - became visible or the styles changed. - */ - Canvas.prototype.clearCache = function() { - var cache = this._textCache; - for (var layerKey in cache) { - if (hasOwnProperty.call(cache, layerKey)) { - var layer = this.getSVGLayer(layerKey); - while (layer.firstChild) { - layer.removeChild(layer.firstChild); - } - } - }; - - this._textCache = {}; - }; - - function generateKey(text) { - return text.replace(/0|1|2|3|4|5|6|7|8|9/g, '0'); - } - - if (!window.Flot) { - window.Flot = {}; - } - - window.Flot.Canvas = Canvas; -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.colorhelpers.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.colorhelpers.js deleted file mode 100644 index c59cf2f..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.colorhelpers.js +++ /dev/null @@ -1,199 +0,0 @@ -/* Plugin for jQuery for working with colors. - * - * Version 1.1. - * - * Inspiration from jQuery color animation plugin by John Resig. - * - * Released under the MIT license by Ole Laursen, October 2009. - * - * Examples: - * - * $.color.parse("#fff").scale('rgb', 0.25).add('a', -0.5).toString() - * var c = $.color.extract($("#mydiv"), 'background-color'); - * console.log(c.r, c.g, c.b, c.a); - * $.color.make(100, 50, 25, 0.4).toString() // returns "rgba(100,50,25,0.4)" - * - * Note that .scale() and .add() return the same modified object - * instead of making a new one. - * - * V. 1.1: Fix error handling so e.g. parsing an empty string does - * produce a color rather than just crashing. - */ - -(function($) { - $.color = {}; - - // construct color object with some convenient chainable helpers - $.color.make = function (r, g, b, a) { - var o = {}; - o.r = r || 0; - o.g = g || 0; - o.b = b || 0; - o.a = a != null ? a : 1; - - o.add = function (c, d) { - for (var i = 0; i < c.length; ++i) { - o[c.charAt(i)] += d; - } - - return o.normalize(); - }; - - o.scale = function (c, f) { - for (var i = 0; i < c.length; ++i) { - o[c.charAt(i)] *= f; - } - - return o.normalize(); - }; - - o.toString = function () { - if (o.a >= 1.0) { - return "rgb(" + [o.r, o.g, o.b].join(",") + ")"; - } else { - return "rgba(" + [o.r, o.g, o.b, o.a].join(",") + ")"; - } - }; - - o.normalize = function () { - function clamp(min, value, max) { - return value < min ? min : (value > max ? max : value); - } - - o.r = clamp(0, parseInt(o.r), 255); - o.g = clamp(0, parseInt(o.g), 255); - o.b = clamp(0, parseInt(o.b), 255); - o.a = clamp(0, o.a, 1); - return o; - }; - - o.clone = function () { - return $.color.make(o.r, o.b, o.g, o.a); - }; - - return o.normalize(); - } - - // extract CSS color property from element, going up in the DOM - // if it's "transparent" - $.color.extract = function (elem, css) { - var c; - - do { - c = elem.css(css).toLowerCase(); - // keep going until we find an element that has color, or - // we hit the body or root (have no parent) - if (c !== '' && c !== 'transparent') { - break; - } - - elem = elem.parent(); - } while (elem.length && !$.nodeName(elem.get(0), "body")); - - // catch Safari's way of signalling transparent - if (c === "rgba(0, 0, 0, 0)") { - c = "transparent"; - } - - return $.color.parse(c); - } - - // parse CSS color string (like "rgb(10, 32, 43)" or "#fff"), - // returns color object, if parsing failed, you get black (0, 0, - // 0) out - $.color.parse = function (str) { - var res, m = $.color.make; - - // Look for rgb(num,num,num) - res = /rgb\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*\)/.exec(str); - if (res) { - return m(parseInt(res[1], 10), parseInt(res[2], 10), parseInt(res[3], 10)); - } - - // Look for rgba(num,num,num,num) - res = /rgba\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(str) - if (res) { - return m(parseInt(res[1], 10), parseInt(res[2], 10), parseInt(res[3], 10), parseFloat(res[4])); - } - - // Look for rgb(num%,num%,num%) - res = /rgb\(\s*([0-9]+(?:\.[0-9]+)?)%\s*,\s*([0-9]+(?:\.[0-9]+)?)%\s*,\s*([0-9]+(?:\.[0-9]+)?)%\s*\)/.exec(str); - if (res) { - return m(parseFloat(res[1]) * 2.55, parseFloat(res[2]) * 2.55, parseFloat(res[3]) * 2.55); - } - - // Look for rgba(num%,num%,num%,num) - res = /rgba\(\s*([0-9]+(?:\.[0-9]+)?)%\s*,\s*([0-9]+(?:\.[0-9]+)?)%\s*,\s*([0-9]+(?:\.[0-9]+)?)%\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(str); - if (res) { - return m(parseFloat(res[1]) * 2.55, parseFloat(res[2]) * 2.55, parseFloat(res[3]) * 2.55, parseFloat(res[4])); - } - - // Look for #a0b1c2 - res = /#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})/.exec(str); - if (res) { - return m(parseInt(res[1], 16), parseInt(res[2], 16), parseInt(res[3], 16)); - } - - // Look for #fff - res = /#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])/.exec(str); - if (res) { - return m(parseInt(res[1] + res[1], 16), parseInt(res[2] + res[2], 16), parseInt(res[3] + res[3], 16)); - } - - // Otherwise, we're most likely dealing with a named color - var name = $.trim(str).toLowerCase(); - if (name === "transparent") { - return m(255, 255, 255, 0); - } else { - // default to black - res = lookupColors[name] || [0, 0, 0]; - return m(res[0], res[1], res[2]); - } - } - - var lookupColors = { - aqua: [0, 255, 255], - azure: [240, 255, 255], - beige: [245, 245, 220], - black: [0, 0, 0], - blue: [0, 0, 255], - brown: [165, 42, 42], - cyan: [0, 255, 255], - darkblue: [0, 0, 139], - darkcyan: [0, 139, 139], - darkgrey: [169, 169, 169], - darkgreen: [0, 100, 0], - darkkhaki: [189, 183, 107], - darkmagenta: [139, 0, 139], - darkolivegreen: [85, 107, 47], - darkorange: [255, 140, 0], - darkorchid: [153, 50, 204], - darkred: [139, 0, 0], - darksalmon: [233, 150, 122], - darkviolet: [148, 0, 211], - fuchsia: [255, 0, 255], - gold: [255, 215, 0], - green: [0, 128, 0], - indigo: [75, 0, 130], - khaki: [240, 230, 140], - lightblue: [173, 216, 230], - lightcyan: [224, 255, 255], - lightgreen: [144, 238, 144], - lightgrey: [211, 211, 211], - lightpink: [255, 182, 193], - lightyellow: [255, 255, 224], - lime: [0, 255, 0], - magenta: [255, 0, 255], - maroon: [128, 0, 0], - navy: [0, 0, 128], - olive: [128, 128, 0], - orange: [255, 165, 0], - pink: [255, 192, 203], - purple: [128, 0, 128], - violet: [128, 0, 128], - red: [255, 0, 0], - silver: [192, 192, 192], - white: [255, 255, 255], - yellow: [255, 255, 0] - }; -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.axislabels.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.axislabels.js deleted file mode 100644 index 8c80828..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.axislabels.js +++ /dev/null @@ -1,212 +0,0 @@ -/* -Axis label plugin for flot - -Derived from: -Axis Labels Plugin for flot. -http://github.com/markrcote/flot-axislabels - -Original code is Copyright (c) 2010 Xuan Luo. -Original code was released under the GPLv3 license by Xuan Luo, September 2010. -Original code was rereleased under the MIT license by Xuan Luo, April 2012. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - -(function($) { - "use strict"; - - var options = { - axisLabels: { - show: true - } - }; - - function AxisLabel(axisName, position, padding, placeholder, axisLabel, surface) { - this.axisName = axisName; - this.position = position; - this.padding = padding; - this.placeholder = placeholder; - this.axisLabel = axisLabel; - this.surface = surface; - this.width = 0; - this.height = 0; - this.elem = null; - } - - AxisLabel.prototype.calculateSize = function() { - var axisId = this.axisName + 'Label', - layerId = axisId + 'Layer', - className = axisId + ' axisLabels'; - - var info = this.surface.getTextInfo(layerId, this.axisLabel, className); - this.labelWidth = info.width; - this.labelHeight = info.height; - - if (this.position === 'left' || this.position === 'right') { - this.width = this.labelHeight + this.padding; - this.height = 0; - } else { - this.width = 0; - this.height = this.labelHeight + this.padding; - } - }; - - AxisLabel.prototype.transforms = function(degrees, x, y, svgLayer) { - var transforms = [], translate, rotate; - if (x !== 0 || y !== 0) { - translate = svgLayer.createSVGTransform(); - translate.setTranslate(x, y); - transforms.push(translate); - } - if (degrees !== 0) { - rotate = svgLayer.createSVGTransform(); - var centerX = Math.round(this.labelWidth / 2), - centerY = 0; - rotate.setRotate(degrees, centerX, centerY); - transforms.push(rotate); - } - - return transforms; - }; - - AxisLabel.prototype.calculateOffsets = function(box) { - var offsets = { - x: 0, - y: 0, - degrees: 0 - }; - if (this.position === 'bottom') { - offsets.x = box.left + box.width / 2 - this.labelWidth / 2; - offsets.y = box.top + box.height - this.labelHeight; - } else if (this.position === 'top') { - offsets.x = box.left + box.width / 2 - this.labelWidth / 2; - offsets.y = box.top; - } else if (this.position === 'left') { - offsets.degrees = -90; - offsets.x = box.left - this.labelWidth / 2; - offsets.y = box.height / 2 + box.top; - } else if (this.position === 'right') { - offsets.degrees = 90; - offsets.x = box.left + box.width - this.labelWidth / 2; - offsets.y = box.height / 2 + box.top; - } - offsets.x = Math.round(offsets.x); - offsets.y = Math.round(offsets.y); - - return offsets; - }; - - AxisLabel.prototype.cleanup = function() { - var axisId = this.axisName + 'Label', - layerId = axisId + 'Layer', - className = axisId + ' axisLabels'; - this.surface.removeText(layerId, 0, 0, this.axisLabel, className); - }; - - AxisLabel.prototype.draw = function(box) { - var axisId = this.axisName + 'Label', - layerId = axisId + 'Layer', - className = axisId + ' axisLabels', - offsets = this.calculateOffsets(box), - style = { - position: 'absolute', - bottom: '', - right: '', - display: 'inline-block', - 'white-space': 'nowrap' - }; - - var layer = this.surface.getSVGLayer(layerId); - var transforms = this.transforms(offsets.degrees, offsets.x, offsets.y, layer.parentNode); - - this.surface.addText(layerId, 0, 0, this.axisLabel, className, undefined, undefined, undefined, undefined, transforms); - this.surface.render(); - Object.keys(style).forEach(function(key) { - layer.style[key] = style[key]; - }); - }; - - function init(plot) { - plot.hooks.processOptions.push(function(plot, options) { - if (!options.axisLabels.show) { - return; - } - - var axisLabels = {}; - var defaultPadding = 2; // padding between axis and tick labels - - plot.hooks.axisReserveSpace.push(function(plot, axis) { - var opts = axis.options; - var axisName = axis.direction + axis.n; - - axis.labelHeight += axis.boxPosition.centerY; - axis.labelWidth += axis.boxPosition.centerX; - - if (!opts || !opts.axisLabel || !axis.show) { - return; - } - - var padding = opts.axisLabelPadding === undefined - ? defaultPadding - : opts.axisLabelPadding; - - var axisLabel = axisLabels[axisName]; - if (!axisLabel) { - axisLabel = new AxisLabel(axisName, - opts.position, padding, - plot.getPlaceholder()[0], opts.axisLabel, plot.getSurface()); - axisLabels[axisName] = axisLabel; - } - - axisLabel.calculateSize(); - - // Incrementing the sizes of the tick labels. - axis.labelHeight += axisLabel.height; - axis.labelWidth += axisLabel.width; - }); - - // TODO - use the drawAxis hook - plot.hooks.draw.push(function(plot, ctx) { - $.each(plot.getAxes(), function(flotAxisName, axis) { - var opts = axis.options; - if (!opts || !opts.axisLabel || !axis.show) { - return; - } - - var axisName = axis.direction + axis.n; - axisLabels[axisName].draw(axis.box); - }); - }); - - plot.hooks.shutdown.push(function(plot, eventHolder) { - for (var axisName in axisLabels) { - axisLabels[axisName].cleanup(); - } - }); - }); - }; - - $.plot.plugins.push({ - init: init, - options: options, - name: 'axisLabels', - version: '3.0' - }); -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.browser.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.browser.js deleted file mode 100644 index e50a629..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.browser.js +++ /dev/null @@ -1,98 +0,0 @@ -/** ## jquery.flot.browser.js - -This plugin is used to make available some browser-related utility functions. - -### Methods -*/ - -(function ($) { - 'use strict'; - - var browser = { - /** - - getPageXY(e) - - Calculates the pageX and pageY using the screenX, screenY properties of the event - and the scrolling of the page. This is needed because the pageX and pageY - properties of the event are not correct while running tests in Edge. */ - getPageXY: function (e) { - // This code is inspired from https://stackoverflow.com/a/3464890 - var doc = document.documentElement, - pageX = e.clientX + (window.pageXOffset || doc.scrollLeft) - (doc.clientLeft || 0), - pageY = e.clientY + (window.pageYOffset || doc.scrollTop) - (doc.clientTop || 0); - return { X: pageX, Y: pageY }; - }, - - /** - - getPixelRatio(context) - - This function returns the current pixel ratio defined by the product of desktop - zoom and page zoom. - Additional info: https://www.html5rocks.com/en/tutorials/canvas/hidpi/ - */ - getPixelRatio: function(context) { - var devicePixelRatio = window.devicePixelRatio || 1, - backingStoreRatio = - context.webkitBackingStorePixelRatio || - context.mozBackingStorePixelRatio || - context.msBackingStorePixelRatio || - context.oBackingStorePixelRatio || - context.backingStorePixelRatio || 1; - return devicePixelRatio / backingStoreRatio; - }, - - /** - - isSafari, isMobileSafari, isOpera, isFirefox, isIE, isEdge, isChrome, isBlink - - This is a collection of functions, used to check if the code is running in a - particular browser or Javascript engine. - */ - isSafari: function() { - // *** https://stackoverflow.com/questions/9847580/how-to-detect-safari-chrome-ie-firefox-and-opera-browser - // Safari 3.0+ "[object HTMLElementConstructor]" - return /constructor/i.test(window.top.HTMLElement) || (function (p) { return p.toString() === "[object SafariRemoteNotification]"; })(!window.top['safari'] || (typeof window.top.safari !== 'undefined' && window.top.safari.pushNotification)); - }, - - isMobileSafari: function() { - //isMobileSafari adapted from https://stackoverflow.com/questions/3007480/determine-if-user-navigated-from-mobile-safari - return navigator.userAgent.match(/(iPod|iPhone|iPad)/) && navigator.userAgent.match(/AppleWebKit/); - }, - - isOpera: function() { - // *** https://stackoverflow.com/questions/9847580/how-to-detect-safari-chrome-ie-firefox-and-opera-browser - //Opera 8.0+ - return (!!window.opr && !!opr.addons) || !!window.opera || navigator.userAgent.indexOf(' OPR/') >= 0; - }, - - isFirefox: function() { - // *** https://stackoverflow.com/questions/9847580/how-to-detect-safari-chrome-ie-firefox-and-opera-browser - // Firefox 1.0+ - return typeof InstallTrigger !== 'undefined'; - }, - - isIE: function() { - // *** https://stackoverflow.com/questions/9847580/how-to-detect-safari-chrome-ie-firefox-and-opera-browser - // Internet Explorer 6-11 - return /*@cc_on!@*/false || !!document.documentMode; - }, - - isEdge: function() { - // *** https://stackoverflow.com/questions/9847580/how-to-detect-safari-chrome-ie-firefox-and-opera-browser - // Edge 20+ - return !browser.isIE() && !!window.StyleMedia; - }, - - isChrome: function() { - // *** https://stackoverflow.com/questions/9847580/how-to-detect-safari-chrome-ie-firefox-and-opera-browser - // Chrome 1+ - return !!window.chrome && !!window.chrome.webstore; - }, - - isBlink: function() { - // *** https://stackoverflow.com/questions/9847580/how-to-detect-safari-chrome-ie-firefox-and-opera-browser - return (browser.isChrome() || browser.isOpera()) && !!window.CSS; - } - }; - - $.plot.browser = browser; -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.categories.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.categories.js deleted file mode 100644 index af16f78..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.categories.js +++ /dev/null @@ -1,202 +0,0 @@ -/* Flot plugin for plotting textual data or categories. - -Copyright (c) 2007-2014 IOLA and Ole Laursen. -Licensed under the MIT license. - -Consider a dataset like [["February", 34], ["March", 20], ...]. This plugin -allows you to plot such a dataset directly. - -To enable it, you must specify mode: "categories" on the axis with the textual -labels, e.g. - - $.plot("#placeholder", data, { xaxis: { mode: "categories" } }); - -By default, the labels are ordered as they are met in the data series. If you -need a different ordering, you can specify "categories" on the axis options -and list the categories there: - - xaxis: { - mode: "categories", - categories: ["February", "March", "April"] - } - -If you need to customize the distances between the categories, you can specify -"categories" as an object mapping labels to values - - xaxis: { - mode: "categories", - categories: { "February": 1, "March": 3, "April": 4 } - } - -If you don't specify all categories, the remaining categories will be numbered -from the max value plus 1 (with a spacing of 1 between each). - -Internally, the plugin works by transforming the input data through an auto- -generated mapping where the first category becomes 0, the second 1, etc. -Hence, a point like ["February", 34] becomes [0, 34] internally in Flot (this -is visible in hover and click events that return numbers rather than the -category labels). The plugin also overrides the tick generator to spit out the -categories as ticks instead of the values. - -If you need to map a value back to its label, the mapping is always accessible -as "categories" on the axis object, e.g. plot.getAxes().xaxis.categories. - -*/ - -(function ($) { - var options = { - xaxis: { - categories: null - }, - yaxis: { - categories: null - } - }; - - function processRawData(plot, series, data, datapoints) { - // if categories are enabled, we need to disable - // auto-transformation to numbers so the strings are intact - // for later processing - - var xCategories = series.xaxis.options.mode === "categories", - yCategories = series.yaxis.options.mode === "categories"; - - if (!(xCategories || yCategories)) { - return; - } - - var format = datapoints.format; - - if (!format) { - // FIXME: auto-detection should really not be defined here - var s = series; - format = []; - format.push({ x: true, number: true, required: true, computeRange: true}); - format.push({ y: true, number: true, required: true, computeRange: true }); - - if (s.bars.show || (s.lines.show && s.lines.fill)) { - var autoScale = !!((s.bars.show && s.bars.zero) || (s.lines.show && s.lines.zero)); - format.push({ y: true, number: true, required: false, defaultValue: 0, computeRange: autoScale }); - if (s.bars.horizontal) { - delete format[format.length - 1].y; - format[format.length - 1].x = true; - } - } - - datapoints.format = format; - } - - for (var m = 0; m < format.length; ++m) { - if (format[m].x && xCategories) { - format[m].number = false; - } - - if (format[m].y && yCategories) { - format[m].number = false; - format[m].computeRange = false; - } - } - } - - function getNextIndex(categories) { - var index = -1; - - for (var v in categories) { - if (categories[v] > index) { - index = categories[v]; - } - } - - return index + 1; - } - - function categoriesTickGenerator(axis) { - var res = []; - for (var label in axis.categories) { - var v = axis.categories[label]; - if (v >= axis.min && v <= axis.max) { - res.push([v, label]); - } - } - - res.sort(function (a, b) { return a[0] - b[0]; }); - - return res; - } - - function setupCategoriesForAxis(series, axis, datapoints) { - if (series[axis].options.mode !== "categories") { - return; - } - - if (!series[axis].categories) { - // parse options - var c = {}, o = series[axis].options.categories || {}; - if ($.isArray(o)) { - for (var i = 0; i < o.length; ++i) { - c[o[i]] = i; - } - } else { - for (var v in o) { - c[v] = o[v]; - } - } - - series[axis].categories = c; - } - - // fix ticks - if (!series[axis].options.ticks) { - series[axis].options.ticks = categoriesTickGenerator; - } - - transformPointsOnAxis(datapoints, axis, series[axis].categories); - } - - function transformPointsOnAxis(datapoints, axis, categories) { - // go through the points, transforming them - var points = datapoints.points, - ps = datapoints.pointsize, - format = datapoints.format, - formatColumn = axis.charAt(0), - index = getNextIndex(categories); - - for (var i = 0; i < points.length; i += ps) { - if (points[i] == null) { - continue; - } - - for (var m = 0; m < ps; ++m) { - var val = points[i + m]; - - if (val == null || !format[m][formatColumn]) { - continue; - } - - if (!(val in categories)) { - categories[val] = index; - ++index; - } - - points[i + m] = categories[val]; - } - } - } - - function processDatapoints(plot, series, datapoints) { - setupCategoriesForAxis(series, "xaxis", datapoints); - setupCategoriesForAxis(series, "yaxis", datapoints); - } - - function init(plot) { - plot.hooks.processRawData.push(processRawData); - plot.hooks.processDatapoints.push(processDatapoints); - } - - $.plot.plugins.push({ - init: init, - options: options, - name: 'categories', - version: '1.0' - }); -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.composeImages.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.composeImages.js deleted file mode 100644 index a3d6fe6..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.composeImages.js +++ /dev/null @@ -1,325 +0,0 @@ -/** ## jquery.flot.composeImages.js - -This plugin is used to expose a function used to overlap several canvases and -SVGs, for the purpose of creating a snaphot out of them. - -### When composeImages is used: -When multiple canvases and SVGs have to be overlapped into a single image -and their offset on the page, must be preserved. - -### Where can be used: -In creating a downloadable snapshot of the plots, axes, cursors etc of a graph. - -### How it works: -The entry point is composeImages function. It expects an array of objects, -which should be either canvases or SVGs (or a mix). It does a prevalidation -of them, by verifying if they will be usable or not, later in the flow. -After selecting only usable sources, it passes them to getGenerateTempImg -function, which generates temporary images out of them. This function -expects that some of the passed sources (canvas or SVG) may still have -problems being converted to an image and makes sure the promises system, -used by composeImages function, moves forward. As an example, SVGs with -missing information from header or with unsupported content, may lead to -failure in generating the temporary image. Temporary images are required -mostly on extracting content from SVGs, but this is also where the x/y -offsets are extracted for each image which will be added. For SVGs in -particular, their CSS rules have to be applied. -After all temporary images are generated, they are overlapped using -getExecuteImgComposition function. This is where the destination canvas -is set to the proper dimensions. It is then output by composeImages. -This function returns a promise, which can be used to wait for the whole -composition process. It requires to be asynchronous, because this is how -temporary images load their data. -*/ - -(function($) { - "use strict"; - const GENERALFAILURECALLBACKERROR = -100; //simply a negative number - const SUCCESSFULIMAGEPREPARATION = 0; - const EMPTYARRAYOFIMAGESOURCES = -1; - const NEGATIVEIMAGESIZE = -2; - var pixelRatio = 1; - var browser = $.plot.browser; - var getPixelRatio = browser.getPixelRatio; - - function composeImages(canvasOrSvgSources, destinationCanvas) { - var validCanvasOrSvgSources = canvasOrSvgSources.filter(isValidSource); - pixelRatio = getPixelRatio(destinationCanvas.getContext('2d')); - - var allImgCompositionPromises = validCanvasOrSvgSources.map(function(validCanvasOrSvgSource) { - var tempImg = new Image(); - var currentPromise = new Promise(getGenerateTempImg(tempImg, validCanvasOrSvgSource)); - return currentPromise; - }); - - var lastPromise = Promise.all(allImgCompositionPromises).then(getExecuteImgComposition(destinationCanvas), failureCallback); - return lastPromise; - } - - function isValidSource(canvasOrSvgSource) { - var isValidFromCanvas = true; - var isValidFromContent = true; - if ((canvasOrSvgSource === null) || (canvasOrSvgSource === undefined)) { - isValidFromContent = false; - } else { - if (canvasOrSvgSource.tagName === 'CANVAS') { - if ((canvasOrSvgSource.getBoundingClientRect().right === canvasOrSvgSource.getBoundingClientRect().left) || - (canvasOrSvgSource.getBoundingClientRect().bottom === canvasOrSvgSource.getBoundingClientRect().top)) { - isValidFromCanvas = false; - } - } - } - return isValidFromContent && isValidFromCanvas && (window.getComputedStyle(canvasOrSvgSource).visibility === 'visible'); - } - - function getGenerateTempImg(tempImg, canvasOrSvgSource) { - tempImg.sourceDescription = ''; - tempImg.sourceComponent = canvasOrSvgSource; - - return function doGenerateTempImg(successCallbackFunc, failureCallbackFunc) { - tempImg.onload = function(evt) { - tempImg.successfullyLoaded = true; - successCallbackFunc(tempImg); - }; - - tempImg.onabort = function(evt) { - tempImg.successfullyLoaded = false; - console.log('Can\'t generate temp image from ' + tempImg.sourceDescription + '. It is possible that it is missing some properties or its content is not supported by this browser. Source component:', tempImg.sourceComponent); - successCallbackFunc(tempImg); //call successCallback, to allow snapshot of all working images - }; - - tempImg.onerror = function(evt) { - tempImg.successfullyLoaded = false; - console.log('Can\'t generate temp image from ' + tempImg.sourceDescription + '. It is possible that it is missing some properties or its content is not supported by this browser. Source component:', tempImg.sourceComponent); - successCallbackFunc(tempImg); //call successCallback, to allow snapshot of all working images - }; - - generateTempImageFromCanvasOrSvg(canvasOrSvgSource, tempImg); - }; - } - - function getExecuteImgComposition(destinationCanvas) { - return function executeImgComposition(tempImgs) { - var compositionResult = copyImgsToCanvas(tempImgs, destinationCanvas); - return compositionResult; - }; - } - - function copyCanvasToImg(canvas, img) { - img.src = canvas.toDataURL('image/png'); - } - - function getCSSRules(document) { - var styleSheets = document.styleSheets, - rulesList = []; - for (var i = 0; i < styleSheets.length; i++) { - // in Chrome, the external CSS files are empty when the page is directly loaded from disk - var rules = styleSheets[i].cssRules || []; - for (var j = 0; j < rules.length; j++) { - var rule = rules[j]; - rulesList.push(rule.cssText); - } - } - return rulesList; - } - - function embedCSSRulesInSVG(rules, svg) { - var text = [ - '', - '', - svg.innerHTML, - '' - ].join('\n'); - return text; - } - - function copySVGToImgMostBrowsers(svg, img) { - var rules = getCSSRules(document), - source = embedCSSRulesInSVG(rules, svg); - - source = patchSVGSource(source); - - var blob = new Blob([source], {type: "image/svg+xml;charset=utf-8"}), - domURL = self.URL || self.webkitURL || self, - url = domURL.createObjectURL(blob); - img.src = url; - } - - function copySVGToImgSafari(svg, img) { - // Use this method to convert a string buffer array to a binary string. - // Do so by breaking up large strings into smaller substrings; this is necessary to avoid the - // "maximum call stack size exceeded" exception that can happen when calling 'String.fromCharCode.apply' - // with a very long array. - function buildBinaryString (arrayBuffer) { - var binaryString = ""; - const utf8Array = new Uint8Array(arrayBuffer); - const blockSize = 16384; - for (var i = 0; i < utf8Array.length; i = i + blockSize) { - const binarySubString = String.fromCharCode.apply(null, utf8Array.subarray(i, i + blockSize)); - binaryString = binaryString + binarySubString; - } - return binaryString; - }; - - var rules = getCSSRules(document), - source = embedCSSRulesInSVG(rules, svg), - data, - utf8BinaryString; - - source = patchSVGSource(source); - - // Encode the string as UTF-8 and convert it to a binary string. The UTF-8 encoding is required to - // capture unicode characters correctly. - utf8BinaryString = buildBinaryString(new (TextEncoder || TextEncoderLite)('utf-8').encode(source)); - - data = "data:image/svg+xml;base64," + btoa(utf8BinaryString); - img.src = data; - } - - function patchSVGSource(svgSource) { - var source = ''; - //add name spaces. - if (!svgSource.match(/^]+xmlns="http:\/\/www\.w3\.org\/2000\/svg"/)) { - source = svgSource.replace(/^]+"http:\/\/www\.w3\.org\/1999\/xlink"/)) { - source = svgSource.replace(/^\r\n' + source; - } - - function copySVGToImg(svg, img) { - if (browser.isSafari() || browser.isMobileSafari()) { - copySVGToImgSafari(svg, img); - } else { - copySVGToImgMostBrowsers(svg, img); - } - } - - function adaptDestSizeToZoom(destinationCanvas, sources) { - function containsSVGs(source) { - return source.srcImgTagName === 'svg'; - } - - if (sources.find(containsSVGs) !== undefined) { - if (pixelRatio < 1) { - destinationCanvas.width = destinationCanvas.width * pixelRatio; - destinationCanvas.height = destinationCanvas.height * pixelRatio; - } - } - } - - function prepareImagesToBeComposed(sources, destination) { - var result = SUCCESSFULIMAGEPREPARATION; - if (sources.length === 0) { - result = EMPTYARRAYOFIMAGESOURCES; //nothing to do if called without sources - } else { - var minX = sources[0].genLeft; - var minY = sources[0].genTop; - var maxX = sources[0].genRight; - var maxY = sources[0].genBottom; - var i = 0; - - for (i = 1; i < sources.length; i++) { - if (minX > sources[i].genLeft) { - minX = sources[i].genLeft; - } - - if (minY > sources[i].genTop) { - minY = sources[i].genTop; - } - } - - for (i = 1; i < sources.length; i++) { - if (maxX < sources[i].genRight) { - maxX = sources[i].genRight; - } - - if (maxY < sources[i].genBottom) { - maxY = sources[i].genBottom; - } - } - - if ((maxX - minX <= 0) || (maxY - minY <= 0)) { - result = NEGATIVEIMAGESIZE; //this might occur on hidden images - } else { - destination.width = Math.round(maxX - minX); - destination.height = Math.round(maxY - minY); - - for (i = 0; i < sources.length; i++) { - sources[i].xCompOffset = sources[i].genLeft - minX; - sources[i].yCompOffset = sources[i].genTop - minY; - } - - adaptDestSizeToZoom(destination, sources); - } - } - return result; - } - - function copyImgsToCanvas(sources, destination) { - var prepareImagesResult = prepareImagesToBeComposed(sources, destination); - if (prepareImagesResult === SUCCESSFULIMAGEPREPARATION) { - var destinationCtx = destination.getContext('2d'); - - for (var i = 0; i < sources.length; i++) { - if (sources[i].successfullyLoaded === true) { - destinationCtx.drawImage(sources[i], sources[i].xCompOffset * pixelRatio, sources[i].yCompOffset * pixelRatio); - } - } - } - return prepareImagesResult; - } - - function adnotateDestImgWithBoundingClientRect(srcCanvasOrSvg, destImg) { - destImg.genLeft = srcCanvasOrSvg.getBoundingClientRect().left; - destImg.genTop = srcCanvasOrSvg.getBoundingClientRect().top; - - if (srcCanvasOrSvg.tagName === 'CANVAS') { - destImg.genRight = destImg.genLeft + srcCanvasOrSvg.width; - destImg.genBottom = destImg.genTop + srcCanvasOrSvg.height; - } - - if (srcCanvasOrSvg.tagName === 'svg') { - destImg.genRight = srcCanvasOrSvg.getBoundingClientRect().right; - destImg.genBottom = srcCanvasOrSvg.getBoundingClientRect().bottom; - } - } - - function generateTempImageFromCanvasOrSvg(srcCanvasOrSvg, destImg) { - if (srcCanvasOrSvg.tagName === 'CANVAS') { - copyCanvasToImg(srcCanvasOrSvg, destImg); - } - - if (srcCanvasOrSvg.tagName === 'svg') { - copySVGToImg(srcCanvasOrSvg, destImg); - } - - destImg.srcImgTagName = srcCanvasOrSvg.tagName; - adnotateDestImgWithBoundingClientRect(srcCanvasOrSvg, destImg); - } - - function failureCallback() { - return GENERALFAILURECALLBACKERROR; - } - - // used for testing - $.plot.composeImages = composeImages; - - function init(plot) { - // used to extend the public API of the plot - plot.composeImages = composeImages; - } - - $.plot.plugins.push({ - init: init, - name: 'composeImages', - version: '1.0' - }); -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.crosshair.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.crosshair.js deleted file mode 100644 index 385c705..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.crosshair.js +++ /dev/null @@ -1,202 +0,0 @@ -/* Flot plugin for showing crosshairs when the mouse hovers over the plot. - -Copyright (c) 2007-2014 IOLA and Ole Laursen. -Licensed under the MIT license. - -The plugin supports these options: - - crosshair: { - mode: null or "x" or "y" or "xy" - color: color - lineWidth: number - } - -Set the mode to one of "x", "y" or "xy". The "x" mode enables a vertical -crosshair that lets you trace the values on the x axis, "y" enables a -horizontal crosshair and "xy" enables them both. "color" is the color of the -crosshair (default is "rgba(170, 0, 0, 0.80)"), "lineWidth" is the width of -the drawn lines (default is 1). - -The plugin also adds four public methods: - - - setCrosshair( pos ) - - Set the position of the crosshair. Note that this is cleared if the user - moves the mouse. "pos" is in coordinates of the plot and should be on the - form { x: xpos, y: ypos } (you can use x2/x3/... if you're using multiple - axes), which is coincidentally the same format as what you get from a - "plothover" event. If "pos" is null, the crosshair is cleared. - - - clearCrosshair() - - Clear the crosshair. - - - lockCrosshair(pos) - - Cause the crosshair to lock to the current location, no longer updating if - the user moves the mouse. Optionally supply a position (passed on to - setCrosshair()) to move it to. - - Example usage: - - var myFlot = $.plot( $("#graph"), ..., { crosshair: { mode: "x" } } }; - $("#graph").bind( "plothover", function ( evt, position, item ) { - if ( item ) { - // Lock the crosshair to the data point being hovered - myFlot.lockCrosshair({ - x: item.datapoint[ 0 ], - y: item.datapoint[ 1 ] - }); - } else { - // Return normal crosshair operation - myFlot.unlockCrosshair(); - } - }); - - - unlockCrosshair() - - Free the crosshair to move again after locking it. -*/ - -(function ($) { - var options = { - crosshair: { - mode: null, // one of null, "x", "y" or "xy", - color: "rgba(170, 0, 0, 0.80)", - lineWidth: 1 - } - }; - - function init(plot) { - // position of crosshair in pixels - var crosshair = {x: -1, y: -1, locked: false, highlighted: false}; - - plot.setCrosshair = function setCrosshair(pos) { - if (!pos) { - crosshair.x = -1; - } else { - var o = plot.p2c(pos); - crosshair.x = Math.max(0, Math.min(o.left, plot.width())); - crosshair.y = Math.max(0, Math.min(o.top, plot.height())); - } - - plot.triggerRedrawOverlay(); - }; - - plot.clearCrosshair = plot.setCrosshair; // passes null for pos - - plot.lockCrosshair = function lockCrosshair(pos) { - if (pos) { - plot.setCrosshair(pos); - } - - crosshair.locked = true; - }; - - plot.unlockCrosshair = function unlockCrosshair() { - crosshair.locked = false; - crosshair.rect = null; - }; - - function onMouseOut(e) { - if (crosshair.locked) { - return; - } - - if (crosshair.x !== -1) { - crosshair.x = -1; - plot.triggerRedrawOverlay(); - } - } - - function onMouseMove(e) { - var offset = plot.offset(); - if (crosshair.locked) { - var mouseX = Math.max(0, Math.min(e.pageX - offset.left, plot.width())); - var mouseY = Math.max(0, Math.min(e.pageY - offset.top, plot.height())); - - if ((mouseX > crosshair.x - 4) && (mouseX < crosshair.x + 4) && (mouseY > crosshair.y - 4) && (mouseY < crosshair.y + 4)) { - if (!crosshair.highlighted) { - crosshair.highlighted = true; - plot.triggerRedrawOverlay(); - } - } else { - if (crosshair.highlighted) { - crosshair.highlighted = false; - plot.triggerRedrawOverlay(); - } - } - return; - } - - if (plot.getSelection && plot.getSelection()) { - crosshair.x = -1; // hide the crosshair while selecting - return; - } - - crosshair.x = Math.max(0, Math.min(e.pageX - offset.left, plot.width())); - crosshair.y = Math.max(0, Math.min(e.pageY - offset.top, plot.height())); - plot.triggerRedrawOverlay(); - } - - plot.hooks.bindEvents.push(function (plot, eventHolder) { - if (!plot.getOptions().crosshair.mode) { - return; - } - - eventHolder.mouseout(onMouseOut); - eventHolder.mousemove(onMouseMove); - }); - - plot.hooks.drawOverlay.push(function (plot, ctx) { - var c = plot.getOptions().crosshair; - if (!c.mode) { - return; - } - - var plotOffset = plot.getPlotOffset(); - - ctx.save(); - ctx.translate(plotOffset.left, plotOffset.top); - - if (crosshair.x !== -1) { - var adj = plot.getOptions().crosshair.lineWidth % 2 ? 0.5 : 0; - - ctx.strokeStyle = c.color; - ctx.lineWidth = c.lineWidth; - ctx.lineJoin = "round"; - - ctx.beginPath(); - if (c.mode.indexOf("x") !== -1) { - var drawX = Math.floor(crosshair.x) + adj; - ctx.moveTo(drawX, 0); - ctx.lineTo(drawX, plot.height()); - } - if (c.mode.indexOf("y") !== -1) { - var drawY = Math.floor(crosshair.y) + adj; - ctx.moveTo(0, drawY); - ctx.lineTo(plot.width(), drawY); - } - if (crosshair.locked) { - if (crosshair.highlighted) ctx.fillStyle = 'orange'; - else ctx.fillStyle = c.color; - ctx.fillRect(Math.floor(crosshair.x) + adj - 4, Math.floor(crosshair.y) + adj - 4, 8, 8); - } - ctx.stroke(); - } - ctx.restore(); - }); - - plot.hooks.shutdown.push(function (plot, eventHolder) { - eventHolder.unbind("mouseout", onMouseOut); - eventHolder.unbind("mousemove", onMouseMove); - }); - } - - $.plot.plugins.push({ - init: init, - options: options, - name: 'crosshair', - version: '1.0' - }); -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.drawSeries.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.drawSeries.js deleted file mode 100644 index 472ad30..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.drawSeries.js +++ /dev/null @@ -1,604 +0,0 @@ -/** -## jquery.flot.drawSeries.js - -This plugin is used by flot for drawing lines, plots, bars or area. - -### Public methods -*/ - -(function($) { - "use strict"; - - function DrawSeries() { - function plotLine(datapoints, xoffset, yoffset, axisx, axisy, ctx) { - var points = datapoints.points, - ps = datapoints.pointsize, - prevx = null, - prevy = null; - var x1 = 0.0, - y1 = 0.0, - x2 = 0.0, - y2 = 0.0, - i = 0; - - ctx.beginPath(); - for (i = ps; i < points.length; i += ps) { - x1 = points[i - ps]; - y1 = points[i - ps + 1]; - x2 = points[i]; - y2 = points[i + 1]; - - if (x1 === null || x2 === null) { - continue; - } - - // clip with ymin - if (y1 <= y2 && y1 < axisy.min) { - if (y2 < axisy.min) { - // line segment is outside - continue; - } - // compute new intersection point - x1 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1; - y1 = axisy.min; - } else if (y2 <= y1 && y2 < axisy.min) { - if (y1 < axisy.min) { - continue; - } - - x2 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1; - y2 = axisy.min; - } - - // clip with ymax - if (y1 >= y2 && y1 > axisy.max) { - if (y2 > axisy.max) { - continue; - } - - x1 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1; - y1 = axisy.max; - } else if (y2 >= y1 && y2 > axisy.max) { - if (y1 > axisy.max) { - continue; - } - - x2 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1; - y2 = axisy.max; - } - - // clip with xmin - if (x1 <= x2 && x1 < axisx.min) { - if (x2 < axisx.min) { - continue; - } - - y1 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1; - x1 = axisx.min; - } else if (x2 <= x1 && x2 < axisx.min) { - if (x1 < axisx.min) { - continue; - } - - y2 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1; - x2 = axisx.min; - } - - // clip with xmax - if (x1 >= x2 && x1 > axisx.max) { - if (x2 > axisx.max) { - continue; - } - - y1 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1; - x1 = axisx.max; - } else if (x2 >= x1 && x2 > axisx.max) { - if (x1 > axisx.max) { - continue; - } - - y2 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1; - x2 = axisx.max; - } - - if (x1 !== prevx || y1 !== prevy) { - ctx.moveTo(axisx.p2c(x1) + xoffset, axisy.p2c(y1) + yoffset); - } - - prevx = x2; - prevy = y2; - ctx.lineTo(axisx.p2c(x2) + xoffset, axisy.p2c(y2) + yoffset); - } - ctx.stroke(); - } - - function plotLineArea(datapoints, axisx, axisy, fillTowards, ctx) { - var points = datapoints.points, - ps = datapoints.pointsize, - bottom = fillTowards > axisy.min ? Math.min(axisy.max, fillTowards) : axisy.min, - i = 0, - ypos = 1, - areaOpen = false, - segmentStart = 0, - segmentEnd = 0; - - // we process each segment in two turns, first forward - // direction to sketch out top, then once we hit the - // end we go backwards to sketch the bottom - while (true) { - if (ps > 0 && i > points.length + ps) { - break; - } - - i += ps; // ps is negative if going backwards - - var x1 = points[i - ps], - y1 = points[i - ps + ypos], - x2 = points[i], - y2 = points[i + ypos]; - - if (ps === -2) { - /* going backwards and no value for the bottom provided in the series*/ - y1 = y2 = bottom; - } - - if (areaOpen) { - if (ps > 0 && x1 != null && x2 == null) { - // at turning point - segmentEnd = i; - ps = -ps; - ypos = 2; - continue; - } - - if (ps < 0 && i === segmentStart + ps) { - // done with the reverse sweep - ctx.fill(); - areaOpen = false; - ps = -ps; - i = segmentStart = segmentEnd + ps; - continue; - } - } - - if (x1 == null || x2 == null) { - continue; - } - - // clip x values - - // clip with xmin - if (x1 <= x2 && x1 < axisx.min) { - if (x2 < axisx.min) { - continue; - } - - y1 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1; - x1 = axisx.min; - } else if (x2 <= x1 && x2 < axisx.min) { - if (x1 < axisx.min) { - continue; - } - - y2 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1; - x2 = axisx.min; - } - - // clip with xmax - if (x1 >= x2 && x1 > axisx.max) { - if (x2 > axisx.max) { - continue; - } - - y1 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1; - x1 = axisx.max; - } else if (x2 >= x1 && x2 > axisx.max) { - if (x1 > axisx.max) { - continue; - } - - y2 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1; - x2 = axisx.max; - } - - if (!areaOpen) { - // open area - ctx.beginPath(); - ctx.moveTo(axisx.p2c(x1), axisy.p2c(bottom)); - areaOpen = true; - } - - // now first check the case where both is outside - if (y1 >= axisy.max && y2 >= axisy.max) { - ctx.lineTo(axisx.p2c(x1), axisy.p2c(axisy.max)); - ctx.lineTo(axisx.p2c(x2), axisy.p2c(axisy.max)); - continue; - } else if (y1 <= axisy.min && y2 <= axisy.min) { - ctx.lineTo(axisx.p2c(x1), axisy.p2c(axisy.min)); - ctx.lineTo(axisx.p2c(x2), axisy.p2c(axisy.min)); - continue; - } - - // else it's a bit more complicated, there might - // be a flat maxed out rectangle first, then a - // triangular cutout or reverse; to find these - // keep track of the current x values - var x1old = x1, - x2old = x2; - - // clip the y values, without shortcutting, we - // go through all cases in turn - - // clip with ymin - if (y1 <= y2 && y1 < axisy.min && y2 >= axisy.min) { - x1 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1; - y1 = axisy.min; - } else if (y2 <= y1 && y2 < axisy.min && y1 >= axisy.min) { - x2 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1; - y2 = axisy.min; - } - - // clip with ymax - if (y1 >= y2 && y1 > axisy.max && y2 <= axisy.max) { - x1 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1; - y1 = axisy.max; - } else if (y2 >= y1 && y2 > axisy.max && y1 <= axisy.max) { - x2 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1; - y2 = axisy.max; - } - - // if the x value was changed we got a rectangle - // to fill - if (x1 !== x1old) { - ctx.lineTo(axisx.p2c(x1old), axisy.p2c(y1)); - // it goes to (x1, y1), but we fill that below - } - - // fill triangular section, this sometimes result - // in redundant points if (x1, y1) hasn't changed - // from previous line to, but we just ignore that - ctx.lineTo(axisx.p2c(x1), axisy.p2c(y1)); - ctx.lineTo(axisx.p2c(x2), axisy.p2c(y2)); - - // fill the other rectangle if it's there - if (x2 !== x2old) { - ctx.lineTo(axisx.p2c(x2), axisy.p2c(y2)); - ctx.lineTo(axisx.p2c(x2old), axisy.p2c(y2)); - } - } - } - - /** - - drawSeriesLines(series, ctx, plotOffset, plotWidth, plotHeight, drawSymbol, getColorOrGradient) - - This function is used for drawing lines or area fill. In case the series has line decimation function - attached, before starting to draw, as an optimization the points will first be decimated. - - The series parameter contains the series to be drawn on ctx context. The plotOffset, plotWidth and - plotHeight are the corresponding parameters of flot used to determine the drawing surface. - The function getColorOrGradient is used to compute the fill style of lines and area. - */ - function drawSeriesLines(series, ctx, plotOffset, plotWidth, plotHeight, drawSymbol, getColorOrGradient) { - ctx.save(); - ctx.translate(plotOffset.left, plotOffset.top); - ctx.lineJoin = "round"; - - if (series.lines.dashes && ctx.setLineDash) { - ctx.setLineDash(series.lines.dashes); - } - - var datapoints = { - format: series.datapoints.format, - points: series.datapoints.points, - pointsize: series.datapoints.pointsize - }; - - if (series.decimate) { - datapoints.points = series.decimate(series, series.xaxis.min, series.xaxis.max, plotWidth, series.yaxis.min, series.yaxis.max, plotHeight); - } - - var lw = series.lines.lineWidth; - - ctx.lineWidth = lw; - ctx.strokeStyle = series.color; - var fillStyle = getFillStyle(series.lines, series.color, 0, plotHeight, getColorOrGradient); - if (fillStyle) { - ctx.fillStyle = fillStyle; - plotLineArea(datapoints, series.xaxis, series.yaxis, series.lines.fillTowards || 0, ctx); - } - - if (lw > 0) { - plotLine(datapoints, 0, 0, series.xaxis, series.yaxis, ctx); - } - - ctx.restore(); - } - - /** - - drawSeriesPoints(series, ctx, plotOffset, plotWidth, plotHeight, drawSymbol, getColorOrGradient) - - This function is used for drawing points using a given symbol. In case the series has points decimation - function attached, before starting to draw, as an optimization the points will first be decimated. - - The series parameter contains the series to be drawn on ctx context. The plotOffset, plotWidth and - plotHeight are the corresponding parameters of flot used to determine the drawing surface. - The function drawSymbol is used to compute and draw the symbol chosen for the points. - */ - function drawSeriesPoints(series, ctx, plotOffset, plotWidth, plotHeight, drawSymbol, getColorOrGradient) { - function drawCircle(ctx, x, y, radius, shadow, fill) { - ctx.moveTo(x + radius, y); - ctx.arc(x, y, radius, 0, shadow ? Math.PI : Math.PI * 2, false); - } - drawCircle.fill = true; - function plotPoints(datapoints, radius, fill, offset, shadow, axisx, axisy, drawSymbolFn) { - var points = datapoints.points, - ps = datapoints.pointsize; - - ctx.beginPath(); - for (var i = 0; i < points.length; i += ps) { - var x = points[i], - y = points[i + 1]; - if (x == null || x < axisx.min || x > axisx.max || y < axisy.min || y > axisy.max) { - continue; - } - - x = axisx.p2c(x); - y = axisy.p2c(y) + offset; - - drawSymbolFn(ctx, x, y, radius, shadow, fill); - } - if (drawSymbolFn.fill && !shadow) { - ctx.fill(); - } - ctx.stroke(); - } - - ctx.save(); - ctx.translate(plotOffset.left, plotOffset.top); - - var datapoints = { - format: series.datapoints.format, - points: series.datapoints.points, - pointsize: series.datapoints.pointsize - }; - - if (series.decimatePoints) { - datapoints.points = series.decimatePoints(series, series.xaxis.min, series.xaxis.max, plotWidth, series.yaxis.min, series.yaxis.max, plotHeight); - } - - var lw = series.points.lineWidth, - radius = series.points.radius, - symbol = series.points.symbol, - drawSymbolFn; - - if (symbol === 'circle') { - drawSymbolFn = drawCircle; - } else if (typeof symbol === 'string' && drawSymbol && drawSymbol[symbol]) { - drawSymbolFn = drawSymbol[symbol]; - } else if (typeof drawSymbol === 'function') { - drawSymbolFn = drawSymbol; - } - - // If the user sets the line width to 0, we change it to a very - // small value. A line width of 0 seems to force the default of 1. - - if (lw === 0) { - lw = 0.0001; - } - - ctx.lineWidth = lw; - ctx.fillStyle = getFillStyle(series.points, series.color, null, null, getColorOrGradient); - ctx.strokeStyle = series.color; - plotPoints(datapoints, radius, - true, 0, false, - series.xaxis, series.yaxis, drawSymbolFn); - ctx.restore(); - } - - function drawBar(x, y, b, barLeft, barRight, fillStyleCallback, axisx, axisy, c, horizontal, lineWidth) { - var left = x + barLeft, - right = x + barRight, - bottom = b, top = y, - drawLeft, drawRight, drawTop, drawBottom = false, - tmp; - - drawLeft = drawRight = drawTop = true; - - // in horizontal mode, we start the bar from the left - // instead of from the bottom so it appears to be - // horizontal rather than vertical - if (horizontal) { - drawBottom = drawRight = drawTop = true; - drawLeft = false; - left = b; - right = x; - top = y + barLeft; - bottom = y + barRight; - - // account for negative bars - if (right < left) { - tmp = right; - right = left; - left = tmp; - drawLeft = true; - drawRight = false; - } - } - else { - drawLeft = drawRight = drawTop = true; - drawBottom = false; - left = x + barLeft; - right = x + barRight; - bottom = b; - top = y; - - // account for negative bars - if (top < bottom) { - tmp = top; - top = bottom; - bottom = tmp; - drawBottom = true; - drawTop = false; - } - } - - // clip - if (right < axisx.min || left > axisx.max || - top < axisy.min || bottom > axisy.max) { - return; - } - - if (left < axisx.min) { - left = axisx.min; - drawLeft = false; - } - - if (right > axisx.max) { - right = axisx.max; - drawRight = false; - } - - if (bottom < axisy.min) { - bottom = axisy.min; - drawBottom = false; - } - - if (top > axisy.max) { - top = axisy.max; - drawTop = false; - } - - left = axisx.p2c(left); - bottom = axisy.p2c(bottom); - right = axisx.p2c(right); - top = axisy.p2c(top); - - // fill the bar - if (fillStyleCallback) { - c.fillStyle = fillStyleCallback(bottom, top); - c.fillRect(left, top, right - left, bottom - top) - } - - // draw outline - if (lineWidth > 0 && (drawLeft || drawRight || drawTop || drawBottom)) { - c.beginPath(); - - // FIXME: inline moveTo is buggy with excanvas - c.moveTo(left, bottom); - if (drawLeft) { - c.lineTo(left, top); - } else { - c.moveTo(left, top); - } - - if (drawTop) { - c.lineTo(right, top); - } else { - c.moveTo(right, top); - } - - if (drawRight) { - c.lineTo(right, bottom); - } else { - c.moveTo(right, bottom); - } - - if (drawBottom) { - c.lineTo(left, bottom); - } else { - c.moveTo(left, bottom); - } - - c.stroke(); - } - } - - /** - - drawSeriesBars(series, ctx, plotOffset, plotWidth, plotHeight, drawSymbol, getColorOrGradient) - - This function is used for drawing series represented as bars. In case the series has decimation - function attached, before starting to draw, as an optimization the points will first be decimated. - - The series parameter contains the series to be drawn on ctx context. The plotOffset, plotWidth and - plotHeight are the corresponding parameters of flot used to determine the drawing surface. - The function getColorOrGradient is used to compute the fill style of bars. - */ - function drawSeriesBars(series, ctx, plotOffset, plotWidth, plotHeight, drawSymbol, getColorOrGradient) { - function plotBars(datapoints, barLeft, barRight, fillStyleCallback, axisx, axisy) { - var points = datapoints.points, - ps = datapoints.pointsize, - fillTowards = series.bars.fillTowards || 0, - calculatedBottom = fillTowards > axisy.min ? Math.min(axisy.max, fillTowards) : axisy.min; - - for (var i = 0; i < points.length; i += ps) { - if (points[i] == null) { - continue; - } - - // Use third point as bottom if pointsize is 3 - var bottom = ps === 3 ? points[i + 2] : calculatedBottom; - drawBar(points[i], points[i + 1], bottom, barLeft, barRight, fillStyleCallback, axisx, axisy, ctx, series.bars.horizontal, series.bars.lineWidth); - } - } - - ctx.save(); - ctx.translate(plotOffset.left, plotOffset.top); - - var datapoints = { - format: series.datapoints.format, - points: series.datapoints.points, - pointsize: series.datapoints.pointsize - }; - - if (series.decimate) { - datapoints.points = series.decimate(series, series.xaxis.min, series.xaxis.max, plotWidth); - } - - ctx.lineWidth = series.bars.lineWidth; - ctx.strokeStyle = series.color; - - var barLeft; - var barWidth = series.bars.barWidth[0] || series.bars.barWidth; - switch (series.bars.align) { - case "left": - barLeft = 0; - break; - case "right": - barLeft = -barWidth; - break; - default: - barLeft = -barWidth / 2; - } - - var fillStyleCallback = series.bars.fill ? function(bottom, top) { - return getFillStyle(series.bars, series.color, bottom, top, getColorOrGradient); - } : null; - - plotBars(datapoints, barLeft, barLeft + barWidth, fillStyleCallback, series.xaxis, series.yaxis); - ctx.restore(); - } - - function getFillStyle(filloptions, seriesColor, bottom, top, getColorOrGradient) { - var fill = filloptions.fill; - if (!fill) { - return null; - } - - if (filloptions.fillColor) { - return getColorOrGradient(filloptions.fillColor, bottom, top, seriesColor); - } - - var c = $.color.parse(seriesColor); - c.a = typeof fill === "number" ? fill : 0.4; - c.normalize(); - return c.toString(); - } - - this.drawSeriesLines = drawSeriesLines; - this.drawSeriesPoints = drawSeriesPoints; - this.drawSeriesBars = drawSeriesBars; - this.drawBar = drawBar; - }; - - $.plot.drawSeries = new DrawSeries(); -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.errorbars.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.errorbars.js deleted file mode 100644 index 956562e..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.errorbars.js +++ /dev/null @@ -1,375 +0,0 @@ -/* Flot plugin for plotting error bars. - -Copyright (c) 2007-2014 IOLA and Ole Laursen. -Licensed under the MIT license. - -Error bars are used to show standard deviation and other statistical -properties in a plot. - -* Created by Rui Pereira - rui (dot) pereira (at) gmail (dot) com - -This plugin allows you to plot error-bars over points. Set "errorbars" inside -the points series to the axis name over which there will be error values in -your data array (*even* if you do not intend to plot them later, by setting -"show: null" on xerr/yerr). - -The plugin supports these options: - - series: { - points: { - errorbars: "x" or "y" or "xy", - xerr: { - show: null/false or true, - asymmetric: null/false or true, - upperCap: null or "-" or function, - lowerCap: null or "-" or function, - color: null or color, - radius: null or number - }, - yerr: { same options as xerr } - } - } - -Each data point array is expected to be of the type: - - "x" [ x, y, xerr ] - "y" [ x, y, yerr ] - "xy" [ x, y, xerr, yerr ] - -Where xerr becomes xerr_lower,xerr_upper for the asymmetric error case, and -equivalently for yerr. Eg., a datapoint for the "xy" case with symmetric -error-bars on X and asymmetric on Y would be: - - [ x, y, xerr, yerr_lower, yerr_upper ] - -By default no end caps are drawn. Setting upperCap and/or lowerCap to "-" will -draw a small cap perpendicular to the error bar. They can also be set to a -user-defined drawing function, with (ctx, x, y, radius) as parameters, as eg. - - function drawSemiCircle( ctx, x, y, radius ) { - ctx.beginPath(); - ctx.arc( x, y, radius, 0, Math.PI, false ); - ctx.moveTo( x - radius, y ); - ctx.lineTo( x + radius, y ); - ctx.stroke(); - } - -Color and radius both default to the same ones of the points series if not -set. The independent radius parameter on xerr/yerr is useful for the case when -we may want to add error-bars to a line, without showing the interconnecting -points (with radius: 0), and still showing end caps on the error-bars. -shadowSize and lineWidth are derived as well from the points series. - -*/ - -(function ($) { - var options = { - series: { - points: { - errorbars: null, //should be 'x', 'y' or 'xy' - xerr: {err: 'x', show: null, asymmetric: null, upperCap: null, lowerCap: null, color: null, radius: null}, - yerr: {err: 'y', show: null, asymmetric: null, upperCap: null, lowerCap: null, color: null, radius: null} - } - } - }; - - function processRawData(plot, series, data, datapoints) { - if (!series.points.errorbars) { - return; - } - - // x,y values - var format = [ - { x: true, number: true, required: true }, - { y: true, number: true, required: true } - ]; - - var errors = series.points.errorbars; - // error bars - first X then Y - if (errors === 'x' || errors === 'xy') { - // lower / upper error - if (series.points.xerr.asymmetric) { - format.push({ x: true, number: true, required: true }); - format.push({ x: true, number: true, required: true }); - } else { - format.push({ x: true, number: true, required: true }); - } - } - if (errors === 'y' || errors === 'xy') { - // lower / upper error - if (series.points.yerr.asymmetric) { - format.push({ y: true, number: true, required: true }); - format.push({ y: true, number: true, required: true }); - } else { - format.push({ y: true, number: true, required: true }); - } - } - datapoints.format = format; - } - - function parseErrors(series, i) { - var points = series.datapoints.points; - - // read errors from points array - var exl = null, - exu = null, - eyl = null, - eyu = null; - var xerr = series.points.xerr, - yerr = series.points.yerr; - - var eb = series.points.errorbars; - // error bars - first X - if (eb === 'x' || eb === 'xy') { - if (xerr.asymmetric) { - exl = points[i + 2]; - exu = points[i + 3]; - if (eb === 'xy') { - if (yerr.asymmetric) { - eyl = points[i + 4]; - eyu = points[i + 5]; - } else { - eyl = points[i + 4]; - } - } - } else { - exl = points[i + 2]; - if (eb === 'xy') { - if (yerr.asymmetric) { - eyl = points[i + 3]; - eyu = points[i + 4]; - } else { - eyl = points[i + 3]; - } - } - } - // only Y - } else { - if (eb === 'y') { - if (yerr.asymmetric) { - eyl = points[i + 2]; - eyu = points[i + 3]; - } else { - eyl = points[i + 2]; - } - } - } - - // symmetric errors? - if (exu == null) exu = exl; - if (eyu == null) eyu = eyl; - - var errRanges = [exl, exu, eyl, eyu]; - // nullify if not showing - if (!xerr.show) { - errRanges[0] = null; - errRanges[1] = null; - } - if (!yerr.show) { - errRanges[2] = null; - errRanges[3] = null; - } - return errRanges; - } - - function drawSeriesErrors(plot, ctx, s) { - var points = s.datapoints.points, - ps = s.datapoints.pointsize, - ax = [s.xaxis, s.yaxis], - radius = s.points.radius, - err = [s.points.xerr, s.points.yerr], - tmp; - - //sanity check, in case some inverted axis hack is applied to flot - var invertX = false; - if (ax[0].p2c(ax[0].max) < ax[0].p2c(ax[0].min)) { - invertX = true; - tmp = err[0].lowerCap; - err[0].lowerCap = err[0].upperCap; - err[0].upperCap = tmp; - } - - var invertY = false; - if (ax[1].p2c(ax[1].min) < ax[1].p2c(ax[1].max)) { - invertY = true; - tmp = err[1].lowerCap; - err[1].lowerCap = err[1].upperCap; - err[1].upperCap = tmp; - } - - for (var i = 0; i < s.datapoints.points.length; i += ps) { - //parse - var errRanges = parseErrors(s, i); - - //cycle xerr & yerr - for (var e = 0; e < err.length; e++) { - var minmax = [ax[e].min, ax[e].max]; - - //draw this error? - if (errRanges[e * err.length]) { - //data coordinates - var x = points[i], - y = points[i + 1]; - - //errorbar ranges - var upper = [x, y][e] + errRanges[e * err.length + 1], - lower = [x, y][e] - errRanges[e * err.length]; - - //points outside of the canvas - if (err[e].err === 'x') { - if (y > ax[1].max || y < ax[1].min || upper < ax[0].min || lower > ax[0].max) { - continue; - } - } - - if (err[e].err === 'y') { - if (x > ax[0].max || x < ax[0].min || upper < ax[1].min || lower > ax[1].max) { - continue; - } - } - - // prevent errorbars getting out of the canvas - var drawUpper = true, - drawLower = true; - - if (upper > minmax[1]) { - drawUpper = false; - upper = minmax[1]; - } - if (lower < minmax[0]) { - drawLower = false; - lower = minmax[0]; - } - - //sanity check, in case some inverted axis hack is applied to flot - if ((err[e].err === 'x' && invertX) || (err[e].err === 'y' && invertY)) { - //swap coordinates - tmp = lower; - lower = upper; - upper = tmp; - tmp = drawLower; - drawLower = drawUpper; - drawUpper = tmp; - tmp = minmax[0]; - minmax[0] = minmax[1]; - minmax[1] = tmp; - } - - // convert to pixels - x = ax[0].p2c(x); - y = ax[1].p2c(y); - upper = ax[e].p2c(upper); - lower = ax[e].p2c(lower); - minmax[0] = ax[e].p2c(minmax[0]); - minmax[1] = ax[e].p2c(minmax[1]); - - //same style as points by default - var lw = err[e].lineWidth ? err[e].lineWidth : s.points.lineWidth, - sw = s.points.shadowSize != null ? s.points.shadowSize : s.shadowSize; - - //shadow as for points - if (lw > 0 && sw > 0) { - var w = sw / 2; - ctx.lineWidth = w; - ctx.strokeStyle = "rgba(0,0,0,0.1)"; - drawError(ctx, err[e], x, y, upper, lower, drawUpper, drawLower, radius, w + w / 2, minmax); - - ctx.strokeStyle = "rgba(0,0,0,0.2)"; - drawError(ctx, err[e], x, y, upper, lower, drawUpper, drawLower, radius, w / 2, minmax); - } - - ctx.strokeStyle = err[e].color - ? err[e].color - : s.color; - ctx.lineWidth = lw; - //draw it - drawError(ctx, err[e], x, y, upper, lower, drawUpper, drawLower, radius, 0, minmax); - } - } - } - } - - function drawError(ctx, err, x, y, upper, lower, drawUpper, drawLower, radius, offset, minmax) { - //shadow offset - y += offset; - upper += offset; - lower += offset; - - // error bar - avoid plotting over circles - if (err.err === 'x') { - if (upper > x + radius) drawPath(ctx, [[upper, y], [Math.max(x + radius, minmax[0]), y]]); - else drawUpper = false; - - if (lower < x - radius) drawPath(ctx, [[Math.min(x - radius, minmax[1]), y], [lower, y]]); - else drawLower = false; - } else { - if (upper < y - radius) drawPath(ctx, [[x, upper], [x, Math.min(y - radius, minmax[0])]]); - else drawUpper = false; - - if (lower > y + radius) drawPath(ctx, [[x, Math.max(y + radius, minmax[1])], [x, lower]]); - else drawLower = false; - } - - //internal radius value in errorbar, allows to plot radius 0 points and still keep proper sized caps - //this is a way to get errorbars on lines without visible connecting dots - radius = err.radius != null - ? err.radius - : radius; - - // upper cap - if (drawUpper) { - if (err.upperCap === '-') { - if (err.err === 'x') drawPath(ctx, [[upper, y - radius], [upper, y + radius]]); - else drawPath(ctx, [[x - radius, upper], [x + radius, upper]]); - } else if ($.isFunction(err.upperCap)) { - if (err.err === 'x') err.upperCap(ctx, upper, y, radius); - else err.upperCap(ctx, x, upper, radius); - } - } - // lower cap - if (drawLower) { - if (err.lowerCap === '-') { - if (err.err === 'x') drawPath(ctx, [[lower, y - radius], [lower, y + radius]]); - else drawPath(ctx, [[x - radius, lower], [x + radius, lower]]); - } else if ($.isFunction(err.lowerCap)) { - if (err.err === 'x') err.lowerCap(ctx, lower, y, radius); - else err.lowerCap(ctx, x, lower, radius); - } - } - } - - function drawPath(ctx, pts) { - ctx.beginPath(); - ctx.moveTo(pts[0][0], pts[0][1]); - for (var p = 1; p < pts.length; p++) { - ctx.lineTo(pts[p][0], pts[p][1]); - } - - ctx.stroke(); - } - - function draw(plot, ctx) { - var plotOffset = plot.getPlotOffset(); - - ctx.save(); - ctx.translate(plotOffset.left, plotOffset.top); - $.each(plot.getData(), function (i, s) { - if (s.points.errorbars && (s.points.xerr.show || s.points.yerr.show)) { - drawSeriesErrors(plot, ctx, s); - } - }); - ctx.restore(); - } - - function init(plot) { - plot.hooks.processRawData.push(processRawData); - plot.hooks.draw.push(draw); - } - - $.plot.plugins.push({ - init: init, - options: options, - name: 'errorbars', - version: '1.0' - }); -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.fillbetween.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.fillbetween.js deleted file mode 100644 index 96cb292..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.fillbetween.js +++ /dev/null @@ -1,254 +0,0 @@ -/* Flot plugin for computing bottoms for filled line and bar charts. - -Copyright (c) 2007-2014 IOLA and Ole Laursen. -Licensed under the MIT license. - -The case: you've got two series that you want to fill the area between. In Flot -terms, you need to use one as the fill bottom of the other. You can specify the -bottom of each data point as the third coordinate manually, or you can use this -plugin to compute it for you. - -In order to name the other series, you need to give it an id, like this: - - var dataset = [ - { data: [ ... ], id: "foo" } , // use default bottom - { data: [ ... ], fillBetween: "foo" }, // use first dataset as bottom - ]; - - $.plot($("#placeholder"), dataset, { lines: { show: true, fill: true }}); - -As a convenience, if the id given is a number that doesn't appear as an id in -the series, it is interpreted as the index in the array instead (so fillBetween: -0 can also mean the first series). - -Internally, the plugin modifies the datapoints in each series. For line series, -extra data points might be inserted through interpolation. Note that at points -where the bottom line is not defined (due to a null point or start/end of line), -the current line will show a gap too. The algorithm comes from the -jquery.flot.stack.js plugin, possibly some code could be shared. - -*/ - -(function ($) { - var options = { - series: { - fillBetween: null // or number - } - }; - - function init(plot) { - function findBottomSeries(s, allseries) { - var i; - - for (i = 0; i < allseries.length; ++i) { - if (allseries[ i ].id === s.fillBetween) { - return allseries[ i ]; - } - } - - if (typeof s.fillBetween === "number") { - if (s.fillBetween < 0 || s.fillBetween >= allseries.length) { - return null; - } - return allseries[ s.fillBetween ]; - } - - return null; - } - - function computeFormat(plot, s, data, datapoints) { - if (s.fillBetween == null) { - return; - } - - format = datapoints.format; - var plotHasId = function(id) { - var plotData = plot.getData(); - for (i = 0; i < plotData.length; i++) { - if (plotData[i].id === id) { - return true; - } - } - - return false; - } - - if (!format) { - format = []; - - format.push({ - x: true, - number: true, - computeRange: s.xaxis.options.autoScale !== 'none', - required: true - }); - format.push({ - y: true, - number: true, - computeRange: s.yaxis.options.autoScale !== 'none', - required: true - }); - - if (s.fillBetween !== undefined && s.fillBetween !== '' && plotHasId(s.fillBetween) && s.fillBetween !== s.id) { - format.push({ - x: false, - y: true, - number: true, - required: false, - computeRange: s.yaxis.options.autoScale !== 'none', - defaultValue: 0 - }); - } - - datapoints.format = format; - } - } - - function computeFillBottoms(plot, s, datapoints) { - if (s.fillBetween == null) { - return; - } - - var other = findBottomSeries(s, plot.getData()); - - if (!other) { - return; - } - - var ps = datapoints.pointsize, - points = datapoints.points, - otherps = other.datapoints.pointsize, - otherpoints = other.datapoints.points, - newpoints = [], - px, py, intery, qx, qy, bottom, - withlines = s.lines.show, - withbottom = ps > 2 && datapoints.format[2].y, - withsteps = withlines && s.lines.steps, - fromgap = true, - i = 0, - j = 0, - l, m; - - while (true) { - if (i >= points.length) { - break; - } - - l = newpoints.length; - - if (points[ i ] == null) { - // copy gaps - for (m = 0; m < ps; ++m) { - newpoints.push(points[ i + m ]); - } - - i += ps; - } else if (j >= otherpoints.length) { - // for lines, we can't use the rest of the points - if (!withlines) { - for (m = 0; m < ps; ++m) { - newpoints.push(points[ i + m ]); - } - } - - i += ps; - } else if (otherpoints[ j ] == null) { - // oops, got a gap - for (m = 0; m < ps; ++m) { - newpoints.push(null); - } - - fromgap = true; - j += otherps; - } else { - // cases where we actually got two points - px = points[ i ]; - py = points[ i + 1 ]; - qx = otherpoints[ j ]; - qy = otherpoints[ j + 1 ]; - bottom = 0; - - if (px === qx) { - for (m = 0; m < ps; ++m) { - newpoints.push(points[ i + m ]); - } - - //newpoints[ l + 1 ] += qy; - bottom = qy; - - i += ps; - j += otherps; - } else if (px > qx) { - // we got past point below, might need to - // insert interpolated extra point - - if (withlines && i > 0 && points[ i - ps ] != null) { - intery = py + (points[ i - ps + 1 ] - py) * (qx - px) / (points[ i - ps ] - px); - newpoints.push(qx); - newpoints.push(intery); - for (m = 2; m < ps; ++m) { - newpoints.push(points[ i + m ]); - } - bottom = qy; - } - - j += otherps; - } else { - // px < qx - // if we come from a gap, we just skip this point - - if (fromgap && withlines) { - i += ps; - continue; - } - - for (m = 0; m < ps; ++m) { - newpoints.push(points[ i + m ]); - } - - // we might be able to interpolate a point below, - // this can give us a better y - - if (withlines && j > 0 && otherpoints[ j - otherps ] != null) { - bottom = qy + (otherpoints[ j - otherps + 1 ] - qy) * (px - qx) / (otherpoints[ j - otherps ] - qx); - } - - //newpoints[l + 1] += bottom; - - i += ps; - } - - fromgap = false; - - if (l !== newpoints.length && withbottom) { - newpoints[ l + 2 ] = bottom; - } - } - - // maintain the line steps invariant - - if (withsteps && l !== newpoints.length && l > 0 && - newpoints[ l ] !== null && - newpoints[ l ] !== newpoints[ l - ps ] && - newpoints[ l + 1 ] !== newpoints[ l - ps + 1 ]) { - for (m = 0; m < ps; ++m) { - newpoints[ l + ps + m ] = newpoints[ l + m ]; - } - newpoints[ l + 1 ] = newpoints[ l - ps + 1 ]; - } - } - - datapoints.points = newpoints; - } - - plot.hooks.processRawData.push(computeFormat); - plot.hooks.processDatapoints.push(computeFillBottoms); - } - - $.plot.plugins.push({ - init: init, - options: options, - name: "fillbetween", - version: "1.0" - }); -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.flatdata.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.flatdata.js deleted file mode 100644 index b91d168..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.flatdata.js +++ /dev/null @@ -1,47 +0,0 @@ -/* Support for flat 1D data series. - -A 1D flat data series is a data series in the form of a regular 1D array. The -main reason for using a flat data series is that it performs better, consumes -less memory and generates less garbage collection than the regular flot format. - -Example: - - plot.setData([[[0,0], [1,1], [2,2], [3,3]]]); // regular flot format - plot.setData([{flatdata: true, data: [0, 1, 2, 3]}]); // flatdata format - -Set series.flatdata to true to enable this plugin. - -You can use series.start to specify the starting index of the series (default is 0) -You can use series.step to specify the interval between consecutive indexes of the series (default is 1) -*/ - -/* global jQuery*/ - -(function ($) { - 'use strict'; - - function process1DRawData(plot, series, data, datapoints) { - if (series.flatdata === true) { - var start = series.start || 0; - var step = typeof series.step === 'number' ? series.step : 1; - datapoints.pointsize = 2; - for (var i = 0, j = 0; i < data.length; i++, j += 2) { - datapoints.points[j] = start + (i * step); - datapoints.points[j + 1] = data[i]; - } - if (datapoints.points !== undefined) { - datapoints.points.length = data.length * 2; - } else { - datapoints.points = []; - } - } - } - - $.plot.plugins.push({ - init: function(plot) { - plot.hooks.processRawData.push(process1DRawData); - }, - name: 'flatdata', - version: '0.0.2' - }); -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.hover.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.hover.js deleted file mode 100644 index 4f0f08e..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.hover.js +++ /dev/null @@ -1,346 +0,0 @@ -/* global jQuery */ - -/** -## jquery.flot.hover.js - -This plugin is used for mouse hover and tap on a point of plot series. -It supports the following options: -```js -grid: { - hoverable: false, //to trigger plothover event on mouse hover or tap on a point - clickable: false //to trigger plotclick event on mouse hover -} -``` - -It listens to native mouse move event or click, as well as artificial generated -tap and touchevent. - -When the mouse is over a point or a tap on a point is performed, that point or -the correscponding bar will be highlighted and a "plothover" event will be generated. - -Custom "touchevent" is triggered when any touch interaction is made. Hover plugin -handles this events by unhighlighting all of the previously highlighted points and generates -"plothovercleanup" event to notify any part that is handling plothover (for exemple to cleanup -the tooltip from webcharts). -*/ - -(function($) { - 'use strict'; - - var options = { - grid: { - hoverable: false, - clickable: false - } - }; - - var browser = $.plot.browser; - - function init(plot) { - plot.hooks.processOptions.push(initHover); - } - - function initHover(plot, options) { - var highlights = []; - - var eventType = { - click: 'click', - hover: 'hover' - } - - var lastMouseMoveEvent = plot.getPlaceholder()[0].lastMouseMoveEvent; - - plot.highlight = highlight; - plot.unhighlight = unhighlight; - - function bindEvents(plot, eventHolder) { - var o = plot.getOptions(); - - if (o.grid.hoverable || o.grid.clickable) { - eventHolder[0].addEventListener('touchevent', triggerCleanupEvent, false); - eventHolder[0].addEventListener('tap', tap.generatePlothoverEvent, false); - } - - if (options.grid.clickable) { - eventHolder.click(onClick); - } - - if (options.grid.hoverable) { - eventHolder.mousemove(onMouseMove); - - // Use bind, rather than .mouseleave, because we officially - // still support jQuery 1.2.6, which doesn't define a shortcut - // for mouseenter or mouseleave. This was a bug/oversight that - // was fixed somewhere around 1.3.x. We can return to using - // .mouseleave when we drop support for 1.2.6. - - eventHolder.bind("mouseleave", onMouseLeave); - } - } - - function shutdown(plot, eventHolder) { - eventHolder[0].removeEventListener('tap', tap.generatePlothoverEvent); - eventHolder[0].removeEventListener('tap', triggerCleanupEvent); - eventHolder.unbind("mousemove", onMouseMove); - eventHolder.unbind("mouseleave", onMouseLeave); - eventHolder.unbind("click", onClick); - highlights = []; - } - - function doTriggerClickHoverEvent(event, eventType, searchDistance) { - var series = plot.getData(); - if (event !== undefined - && series.length > 0 - && series[0].xaxis.c2p !== undefined - && series[0].yaxis.c2p !== undefined) { - var eventToTrigger = "plot" + eventType; - var seriesFlag = eventType + "able"; - triggerClickHoverEvent(eventToTrigger, event, - function(i) { - return series[i][seriesFlag] !== false; - }, searchDistance); - } - } - - var tap = { - generatePlothoverEvent: function (e) { - var o = plot.getOptions(), - newEvent = new CustomEvent('mouseevent'); - - //transform from touch event to mouse event format - newEvent.pageX = e.detail.changedTouches[0].pageX; - newEvent.pageY = e.detail.changedTouches[0].pageY; - newEvent.clientX = e.detail.changedTouches[0].clientX; - newEvent.clientY = e.detail.changedTouches[0].clientY; - - if (o.grid.hoverable) { - doTriggerClickHoverEvent(newEvent, eventType.hover, 30); - } - return false; - } - }; - - if (options.grid.hoverable || options.grid.clickable) { - plot.hooks.bindEvents.push(bindEvents); - plot.hooks.shutdown.push(shutdown); - plot.hooks.drawOverlay.push(drawOverlay); - plot.hooks.processRawData.push(processRawData); - } - - function onMouseMove(e) { - lastMouseMoveEvent = e; - plot.getPlaceholder()[0].lastMouseMoveEvent = e; - doTriggerClickHoverEvent(e, eventType.hover); - } - - function onMouseLeave(e) { - lastMouseMoveEvent = undefined; - plot.getPlaceholder()[0].lastMouseMoveEvent = undefined; - triggerClickHoverEvent("plothover", e, - function(i) { - return false; - }); - } - - function onClick(e) { - doTriggerClickHoverEvent(e, eventType.click); - } - - function triggerCleanupEvent() { - plot.unhighlight(); - plot.getPlaceholder().trigger('plothovercleanup'); - } - - // trigger click or hover event (they send the same parameters - // so we share their code) - function triggerClickHoverEvent(eventname, event, seriesFilter, searchDistance) { - var options = plot.getOptions(), - offset = plot.offset(), - page = browser.getPageXY(event), - canvasX = page.X - offset.left, - canvasY = page.Y - offset.top, - pos = plot.c2p({ - left: canvasX, - top: canvasY - }), - distance = searchDistance !== undefined ? searchDistance : options.grid.mouseActiveRadius; - - pos.pageX = page.X; - pos.pageY = page.Y; - - var item = plot.findNearbyItem(canvasX, canvasY, seriesFilter, distance); - - if (item) { - // fill in mouse pos for any listeners out there - item.pageX = parseInt(item.series.xaxis.p2c(item.datapoint[0]) + offset.left, 10); - item.pageY = parseInt(item.series.yaxis.p2c(item.datapoint[1]) + offset.top, 10); - } - - if (options.grid.autoHighlight) { - // clear auto-highlights - for (var i = 0; i < highlights.length; ++i) { - var h = highlights[i]; - if ((h.auto === eventname && - !(item && h.series === item.series && - h.point[0] === item.datapoint[0] && - h.point[1] === item.datapoint[1])) || !item) { - unhighlight(h.series, h.point); - } - } - - if (item) { - highlight(item.series, item.datapoint, eventname); - } - } - - plot.getPlaceholder().trigger(eventname, [pos, item]); - } - - function highlight(s, point, auto) { - if (typeof s === "number") { - s = plot.getData()[s]; - } - - if (typeof point === "number") { - var ps = s.datapoints.pointsize; - point = s.datapoints.points.slice(ps * point, ps * (point + 1)); - } - - var i = indexOfHighlight(s, point); - if (i === -1) { - highlights.push({ - series: s, - point: point, - auto: auto - }); - - plot.triggerRedrawOverlay(); - } else if (!auto) { - highlights[i].auto = false; - } - } - - function unhighlight(s, point) { - if (s == null && point == null) { - highlights = []; - plot.triggerRedrawOverlay(); - return; - } - - if (typeof s === "number") { - s = plot.getData()[s]; - } - - if (typeof point === "number") { - var ps = s.datapoints.pointsize; - point = s.datapoints.points.slice(ps * point, ps * (point + 1)); - } - - var i = indexOfHighlight(s, point); - if (i !== -1) { - highlights.splice(i, 1); - - plot.triggerRedrawOverlay(); - } - } - - function indexOfHighlight(s, p) { - for (var i = 0; i < highlights.length; ++i) { - var h = highlights[i]; - if (h.series === s && - h.point[0] === p[0] && - h.point[1] === p[1]) { - return i; - } - } - - return -1; - } - - function processRawData() { - triggerCleanupEvent(); - doTriggerClickHoverEvent(lastMouseMoveEvent, eventType.hover); - } - - function drawOverlay(plot, octx, overlay) { - var plotOffset = plot.getPlotOffset(), - i, hi; - - octx.save(); - octx.translate(plotOffset.left, plotOffset.top); - for (i = 0; i < highlights.length; ++i) { - hi = highlights[i]; - - if (hi.series.bars.show) drawBarHighlight(hi.series, hi.point, octx); - else drawPointHighlight(hi.series, hi.point, octx, plot); - } - octx.restore(); - } - } - - function drawPointHighlight(series, point, octx, plot) { - var x = point[0], - y = point[1], - axisx = series.xaxis, - axisy = series.yaxis, - highlightColor = (typeof series.highlightColor === "string") ? series.highlightColor : $.color.parse(series.color).scale('a', 0.5).toString(); - - if (x < axisx.min || x > axisx.max || y < axisy.min || y > axisy.max) { - return; - } - - var pointRadius = series.points.radius + series.points.lineWidth / 2; - octx.lineWidth = pointRadius; - octx.strokeStyle = highlightColor; - var radius = 1.5 * pointRadius; - x = axisx.p2c(x); - y = axisy.p2c(y); - - octx.beginPath(); - var symbol = series.points.symbol; - if (symbol === 'circle') { - octx.arc(x, y, radius, 0, 2 * Math.PI, false); - } else if (typeof symbol === 'string' && plot.drawSymbol && plot.drawSymbol[symbol]) { - plot.drawSymbol[symbol](octx, x, y, radius, false); - } - - octx.closePath(); - octx.stroke(); - } - - function drawBarHighlight(series, point, octx) { - var highlightColor = (typeof series.highlightColor === "string") ? series.highlightColor : $.color.parse(series.color).scale('a', 0.5).toString(), - fillStyle = highlightColor, - barLeft; - - var barWidth = series.bars.barWidth[0] || series.bars.barWidth; - switch (series.bars.align) { - case "left": - barLeft = 0; - break; - case "right": - barLeft = -barWidth; - break; - default: - barLeft = -barWidth / 2; - } - - octx.lineWidth = series.bars.lineWidth; - octx.strokeStyle = highlightColor; - - var fillTowards = series.bars.fillTowards || 0, - bottom = fillTowards > series.yaxis.min ? Math.min(series.yaxis.max, fillTowards) : series.yaxis.min; - - $.plot.drawSeries.drawBar(point[0], point[1], point[2] || bottom, barLeft, barLeft + barWidth, - function() { - return fillStyle; - }, series.xaxis, series.yaxis, octx, series.bars.horizontal, series.bars.lineWidth); - } - - $.plot.plugins.push({ - init: init, - options: options, - name: 'hover', - version: '0.1' - }); -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.image.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.image.js deleted file mode 100644 index ae98fb4..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.image.js +++ /dev/null @@ -1,249 +0,0 @@ -/* Flot plugin for plotting images. - -Copyright (c) 2007-2014 IOLA and Ole Laursen. -Licensed under the MIT license. - -The data syntax is [ [ image, x1, y1, x2, y2 ], ... ] where (x1, y1) and -(x2, y2) are where you intend the two opposite corners of the image to end up -in the plot. Image must be a fully loaded Javascript image (you can make one -with new Image()). If the image is not complete, it's skipped when plotting. - -There are two helpers included for retrieving images. The easiest work the way -that you put in URLs instead of images in the data, like this: - - [ "myimage.png", 0, 0, 10, 10 ] - -Then call $.plot.image.loadData( data, options, callback ) where data and -options are the same as you pass in to $.plot. This loads the images, replaces -the URLs in the data with the corresponding images and calls "callback" when -all images are loaded (or failed loading). In the callback, you can then call -$.plot with the data set. See the included example. - -A more low-level helper, $.plot.image.load(urls, callback) is also included. -Given a list of URLs, it calls callback with an object mapping from URL to -Image object when all images are loaded or have failed loading. - -The plugin supports these options: - - series: { - images: { - show: boolean - anchor: "corner" or "center" - alpha: [ 0, 1 ] - } - } - -They can be specified for a specific series: - - $.plot( $("#placeholder"), [{ - data: [ ... ], - images: { ... } - ]) - -Note that because the data format is different from usual data points, you -can't use images with anything else in a specific data series. - -Setting "anchor" to "center" causes the pixels in the image to be anchored at -the corner pixel centers inside of at the pixel corners, effectively letting -half a pixel stick out to each side in the plot. - -A possible future direction could be support for tiling for large images (like -Google Maps). - -*/ - -(function ($) { - var options = { - series: { - images: { - show: false, - alpha: 1, - anchor: "corner" // or "center" - } - } - }; - - $.plot.image = {}; - - $.plot.image.loadDataImages = function (series, options, callback) { - var urls = [], points = []; - - var defaultShow = options.series.images.show; - - $.each(series, function (i, s) { - if (!(defaultShow || s.images.show)) { - return; - } - - if (s.data) { - s = s.data; - } - - $.each(s, function (i, p) { - if (typeof p[0] === "string") { - urls.push(p[0]); - points.push(p); - } - }); - }); - - $.plot.image.load(urls, function (loadedImages) { - $.each(points, function (i, p) { - var url = p[0]; - if (loadedImages[url]) { - p[0] = loadedImages[url]; - } - }); - - callback(); - }); - } - - $.plot.image.load = function (urls, callback) { - var missing = urls.length, loaded = {}; - if (missing === 0) { - callback({}); - } - - $.each(urls, function (i, url) { - var handler = function () { - --missing; - loaded[url] = this; - - if (missing === 0) { - callback(loaded); - } - }; - - $('').load(handler).error(handler).attr('src', url); - }); - }; - - function drawSeries(plot, ctx, series) { - var plotOffset = plot.getPlotOffset(); - - if (!series.images || !series.images.show) { - return; - } - - var points = series.datapoints.points, - ps = series.datapoints.pointsize; - - for (var i = 0; i < points.length; i += ps) { - var img = points[i], - x1 = points[i + 1], y1 = points[i + 2], - x2 = points[i + 3], y2 = points[i + 4], - xaxis = series.xaxis, yaxis = series.yaxis, - tmp; - - // actually we should check img.complete, but it - // appears to be a somewhat unreliable indicator in - // IE6 (false even after load event) - if (!img || img.width <= 0 || img.height <= 0) { - continue; - } - - if (x1 > x2) { - tmp = x2; - x2 = x1; - x1 = tmp; - } - if (y1 > y2) { - tmp = y2; - y2 = y1; - y1 = tmp; - } - - // if the anchor is at the center of the pixel, expand the - // image by 1/2 pixel in each direction - if (series.images.anchor === "center") { - tmp = 0.5 * (x2 - x1) / (img.width - 1); - x1 -= tmp; - x2 += tmp; - tmp = 0.5 * (y2 - y1) / (img.height - 1); - y1 -= tmp; - y2 += tmp; - } - - // clip - if (x1 === x2 || y1 === y2 || - x1 >= xaxis.max || x2 <= xaxis.min || - y1 >= yaxis.max || y2 <= yaxis.min) { - continue; - } - - var sx1 = 0, sy1 = 0, sx2 = img.width, sy2 = img.height; - if (x1 < xaxis.min) { - sx1 += (sx2 - sx1) * (xaxis.min - x1) / (x2 - x1); - x1 = xaxis.min; - } - - if (x2 > xaxis.max) { - sx2 += (sx2 - sx1) * (xaxis.max - x2) / (x2 - x1); - x2 = xaxis.max; - } - - if (y1 < yaxis.min) { - sy2 += (sy1 - sy2) * (yaxis.min - y1) / (y2 - y1); - y1 = yaxis.min; - } - - if (y2 > yaxis.max) { - sy1 += (sy1 - sy2) * (yaxis.max - y2) / (y2 - y1); - y2 = yaxis.max; - } - - x1 = xaxis.p2c(x1); - x2 = xaxis.p2c(x2); - y1 = yaxis.p2c(y1); - y2 = yaxis.p2c(y2); - - // the transformation may have swapped us - if (x1 > x2) { - tmp = x2; - x2 = x1; - x1 = tmp; - } - if (y1 > y2) { - tmp = y2; - y2 = y1; - y1 = tmp; - } - - tmp = ctx.globalAlpha; - ctx.globalAlpha *= series.images.alpha; - ctx.drawImage(img, - sx1, sy1, sx2 - sx1, sy2 - sy1, - x1 + plotOffset.left, y1 + plotOffset.top, - x2 - x1, y2 - y1); - ctx.globalAlpha = tmp; - } - } - - function processRawData(plot, series, data, datapoints) { - if (!series.images.show) { - return; - } - - // format is Image, x1, y1, x2, y2 (opposite corners) - datapoints.format = [ - { required: true }, - { x: true, number: true, required: true }, - { y: true, number: true, required: true }, - { x: true, number: true, required: true }, - { y: true, number: true, required: true } - ]; - } - - function init(plot) { - plot.hooks.processRawData.push(processRawData); - plot.hooks.drawSeries.push(drawSeries); - } - - $.plot.plugins.push({ - init: init, - options: options, - name: 'image', - version: '1.1' - }); -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.js deleted file mode 100644 index 9469ced..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.js +++ /dev/null @@ -1,2785 +0,0 @@ -/* Javascript plotting library for jQuery, version 1.0.3. - -Copyright (c) 2007-2014 IOLA and Ole Laursen. -Licensed under the MIT license. - -*/ - -// the actual Flot code -(function($) { - "use strict"; - - var Canvas = window.Flot.Canvas; - - function defaultTickGenerator(axis) { - var ticks = [], - start = $.plot.saturated.saturate($.plot.saturated.floorInBase(axis.min, axis.tickSize)), - i = 0, - v = Number.NaN, - prev; - - if (start === -Number.MAX_VALUE) { - ticks.push(start); - start = $.plot.saturated.floorInBase(axis.min + axis.tickSize, axis.tickSize); - } - - do { - prev = v; - //v = start + i * axis.tickSize; - v = $.plot.saturated.multiplyAdd(axis.tickSize, i, start); - ticks.push(v); - ++i; - } while (v < axis.max && v !== prev); - - return ticks; - } - - function defaultTickFormatter(value, axis, precision) { - var oldTickDecimals = axis.tickDecimals, - expPosition = ("" + value).indexOf("e"); - - if (expPosition !== -1) { - return expRepTickFormatter(value, axis, precision); - } - - if (precision > 0) { - axis.tickDecimals = precision; - } - - var factor = axis.tickDecimals ? Math.pow(10, axis.tickDecimals) : 1, - formatted = "" + Math.round(value * factor) / factor; - - // If tickDecimals was specified, ensure that we have exactly that - // much precision; otherwise default to the value's own precision. - if (axis.tickDecimals != null) { - var decimal = formatted.indexOf("."), - decimalPrecision = decimal === -1 ? 0 : formatted.length - decimal - 1; - if (decimalPrecision < axis.tickDecimals) { - var decimals = ("" + factor).substr(1, axis.tickDecimals - decimalPrecision); - formatted = (decimalPrecision ? formatted : formatted + ".") + decimals; - } - } - - axis.tickDecimals = oldTickDecimals; - return formatted; - }; - - function expRepTickFormatter(value, axis, precision) { - var expPosition = ("" + value).indexOf("e"), - exponentValue = parseInt(("" + value).substr(expPosition + 1)), - tenExponent = expPosition !== -1 ? exponentValue : (value > 0 ? Math.floor(Math.log(value) / Math.LN10) : 0), - roundWith = Math.pow(10, tenExponent), - x = value / roundWith; - - if (precision) { - var updatedPrecision = recomputePrecision(value, precision); - return (value / roundWith).toFixed(updatedPrecision) + 'e' + tenExponent; - } - - if (axis.tickDecimals > 0) { - return x.toFixed(recomputePrecision(value, axis.tickDecimals)) + 'e' + tenExponent; - } - return x.toFixed() + 'e' + tenExponent; - } - - function recomputePrecision(num, precision) { - //for numbers close to zero, the precision from flot will be a big number - //while for big numbers, the precision will be negative - var log10Value = Math.log(Math.abs(num)) * Math.LOG10E, - newPrecision = Math.abs(log10Value + precision); - - return newPrecision <= 20 ? Math.floor(newPrecision) : 20; - } - - /////////////////////////////////////////////////////////////////////////// - // The top-level container for the entire plot. - function Plot(placeholder, data_, options_, plugins) { - // data is on the form: - // [ series1, series2 ... ] - // where series is either just the data as [ [x1, y1], [x2, y2], ... ] - // or { data: [ [x1, y1], [x2, y2], ... ], label: "some label", ... } - - var series = [], - options = { - // the color theme used for graphs - colors: ["#edc240", "#afd8f8", "#cb4b4b", "#4da74d", "#9440ed"], - xaxis: { - show: null, // null = auto-detect, true = always, false = never - position: "bottom", // or "top" - mode: null, // null or "time" - font: null, // null (derived from CSS in placeholder) or object like { size: 11, lineHeight: 13, style: "italic", weight: "bold", family: "sans-serif", variant: "small-caps" } - color: null, // base color, labels, ticks - tickColor: null, // possibly different color of ticks, e.g. "rgba(0,0,0,0.15)" - transform: null, // null or f: number -> number to transform axis - inverseTransform: null, // if transform is set, this should be the inverse function - min: null, // min. value to show, null means set automatically - max: null, // max. value to show, null means set automatically - autoScaleMargin: null, // margin in % to add if autoScale option is on "loose" mode, - autoScale: "exact", // Available modes: "none", "loose", "exact", "sliding-window" - windowSize: null, // null or number. This is the size of sliding-window. - growOnly: null, // grow only, useful for smoother auto-scale, the scales will grow to accomodate data but won't shrink back. - ticks: null, // either [1, 3] or [[1, "a"], 3] or (fn: axis info -> ticks) or app. number of ticks for auto-ticks - tickFormatter: null, // fn: number -> string - showTickLabels: "major", // "none", "endpoints", "major", "all" - labelWidth: null, // size of tick labels in pixels - labelHeight: null, - reserveSpace: null, // whether to reserve space even if axis isn't shown - tickLength: null, // size in pixels of major tick marks - showMinorTicks: null, // true = show minor tick marks, false = hide minor tick marks - showTicks: null, // true = show tick marks, false = hide all tick marks - gridLines: null, // true = show grid lines, false = hide grid lines - alignTicksWithAxis: null, // axis number or null for no sync - tickDecimals: null, // no. of decimals, null means auto - tickSize: null, // number or [number, "unit"] - minTickSize: null, // number or [number, "unit"] - offset: { below: 0, above: 0 }, // the plot drawing offset. this is calculated by the flot.navigate for each axis - boxPosition: { centerX: 0, centerY: 0 } //position of the axis on the corresponding axis box - }, - yaxis: { - autoScaleMargin: 0.02, // margin in % to add if autoScale option is on "loose" mode - autoScale: "loose", // Available modes: "none", "loose", "exact" - growOnly: null, // grow only, useful for smoother auto-scale, the scales will grow to accomodate data but won't shrink back. - position: "left", // or "right" - showTickLabels: "major", // "none", "endpoints", "major", "all" - offset: { below: 0, above: 0 }, // the plot drawing offset. this is calculated by the flot.navigate for each axis - boxPosition: { centerX: 0, centerY: 0 } //position of the axis on the corresponding axis box - }, - xaxes: [], - yaxes: [], - series: { - points: { - show: false, - radius: 3, - lineWidth: 2, // in pixels - fill: true, - fillColor: "#ffffff", - symbol: 'circle' // or callback - }, - lines: { - // we don't put in show: false so we can see - // whether lines were actively disabled - lineWidth: 1, // in pixels - fill: false, - fillColor: null, - steps: false - // Omit 'zero', so we can later default its value to - // match that of the 'fill' option. - }, - bars: { - show: false, - lineWidth: 2, // in pixels - // barWidth: number or [number, absolute] - // when 'absolute' is false, 'number' is relative to the minimum distance between points for the series - // when 'absolute' is true, 'number' is considered to be in units of the x-axis - horizontal: false, - barWidth: 0.8, - fill: true, - fillColor: null, - align: "left", // "left", "right", or "center" - zero: true - }, - shadowSize: 3, - highlightColor: null - }, - grid: { - show: true, - aboveData: false, - color: "#545454", // primary color used for outline and labels - backgroundColor: null, // null for transparent, else color - borderColor: null, // set if different from the grid color - tickColor: null, // color for the ticks, e.g. "rgba(0,0,0,0.15)" - margin: 0, // distance from the canvas edge to the grid - labelMargin: 5, // in pixels - axisMargin: 8, // in pixels - borderWidth: 1, // in pixels - minBorderMargin: null, // in pixels, null means taken from points radius - markings: null, // array of ranges or fn: axes -> array of ranges - markingsColor: "#f4f4f4", - markingsLineWidth: 2, - // interactive stuff - clickable: false, - hoverable: false, - autoHighlight: true, // highlight in case mouse is near - mouseActiveRadius: 15 // how far the mouse can be away to activate an item - }, - interaction: { - redrawOverlayInterval: 1000 / 60 // time between updates, -1 means in same flow - }, - hooks: {} - }, - surface = null, // the canvas for the plot itself - overlay = null, // canvas for interactive stuff on top of plot - eventHolder = null, // jQuery object that events should be bound to - ctx = null, - octx = null, - xaxes = [], - yaxes = [], - plotOffset = { - left: 0, - right: 0, - top: 0, - bottom: 0 - }, - plotWidth = 0, - plotHeight = 0, - hooks = { - processOptions: [], - processRawData: [], - processDatapoints: [], - processOffset: [], - setupGrid: [], - adjustSeriesDataRange: [], - setRange: [], - drawBackground: [], - drawSeries: [], - drawAxis: [], - draw: [], - axisReserveSpace: [], - bindEvents: [], - drawOverlay: [], - resize: [], - shutdown: [] - }, - plot = this; - - var eventManager = {}; - - // interactive features - - var redrawTimeout = null; - - // public functions - plot.setData = setData; - plot.setupGrid = setupGrid; - plot.draw = draw; - plot.getPlaceholder = function() { - return placeholder; - }; - plot.getCanvas = function() { - return surface.element; - }; - plot.getSurface = function() { - return surface; - }; - plot.getEventHolder = function() { - return eventHolder[0]; - }; - plot.getPlotOffset = function() { - return plotOffset; - }; - plot.width = function() { - return plotWidth; - }; - plot.height = function() { - return plotHeight; - }; - plot.offset = function() { - var o = eventHolder.offset(); - o.left += plotOffset.left; - o.top += plotOffset.top; - return o; - }; - plot.getData = function() { - return series; - }; - plot.getAxes = function() { - var res = {}; - $.each(xaxes.concat(yaxes), function(_, axis) { - if (axis) { - res[axis.direction + (axis.n !== 1 ? axis.n : "") + "axis"] = axis; - } - }); - return res; - }; - plot.getXAxes = function() { - return xaxes; - }; - plot.getYAxes = function() { - return yaxes; - }; - plot.c2p = canvasToCartesianAxisCoords; - plot.p2c = cartesianAxisToCanvasCoords; - plot.getOptions = function() { - return options; - }; - plot.triggerRedrawOverlay = triggerRedrawOverlay; - plot.pointOffset = function(point) { - return { - left: parseInt(xaxes[axisNumber(point, "x") - 1].p2c(+point.x) + plotOffset.left, 10), - top: parseInt(yaxes[axisNumber(point, "y") - 1].p2c(+point.y) + plotOffset.top, 10) - }; - }; - plot.shutdown = shutdown; - plot.destroy = function() { - shutdown(); - placeholder.removeData("plot").empty(); - - series = []; - options = null; - surface = null; - overlay = null; - eventHolder = null; - ctx = null; - octx = null; - xaxes = []; - yaxes = []; - hooks = null; - plot = null; - }; - - plot.resize = function() { - var width = placeholder.width(), - height = placeholder.height(); - surface.resize(width, height); - overlay.resize(width, height); - - executeHooks(hooks.resize, [width, height]); - }; - - plot.clearTextCache = function () { - surface.clearCache(); - overlay.clearCache(); - }; - - plot.autoScaleAxis = autoScaleAxis; - plot.computeRangeForDataSeries = computeRangeForDataSeries; - plot.adjustSeriesDataRange = adjustSeriesDataRange; - plot.findNearbyItem = findNearbyItem; - plot.findNearbyInterpolationPoint = findNearbyInterpolationPoint; - plot.computeValuePrecision = computeValuePrecision; - plot.computeTickSize = computeTickSize; - plot.addEventHandler = addEventHandler; - - // public attributes - plot.hooks = hooks; - - // initialize - var MINOR_TICKS_COUNT_CONSTANT = $.plot.uiConstants.MINOR_TICKS_COUNT_CONSTANT; - var TICK_LENGTH_CONSTANT = $.plot.uiConstants.TICK_LENGTH_CONSTANT; - initPlugins(plot); - setupCanvases(); - parseOptions(options_); - setData(data_); - setupGrid(true); - draw(); - bindEvents(); - - function executeHooks(hook, args) { - args = [plot].concat(args); - for (var i = 0; i < hook.length; ++i) { - hook[i].apply(this, args); - } - } - - function initPlugins() { - // References to key classes, allowing plugins to modify them - - var classes = { - Canvas: Canvas - }; - - for (var i = 0; i < plugins.length; ++i) { - var p = plugins[i]; - p.init(plot, classes); - if (p.options) { - $.extend(true, options, p.options); - } - } - } - - function parseOptions(opts) { - $.extend(true, options, opts); - - // $.extend merges arrays, rather than replacing them. When less - // colors are provided than the size of the default palette, we - // end up with those colors plus the remaining defaults, which is - // not expected behavior; avoid it by replacing them here. - - if (opts && opts.colors) { - options.colors = opts.colors; - } - - if (options.xaxis.color == null) { - options.xaxis.color = $.color.parse(options.grid.color).scale('a', 0.22).toString(); - } - - if (options.yaxis.color == null) { - options.yaxis.color = $.color.parse(options.grid.color).scale('a', 0.22).toString(); - } - - if (options.xaxis.tickColor == null) { - // grid.tickColor for back-compatibility - options.xaxis.tickColor = options.grid.tickColor || options.xaxis.color; - } - - if (options.yaxis.tickColor == null) { - // grid.tickColor for back-compatibility - options.yaxis.tickColor = options.grid.tickColor || options.yaxis.color; - } - - if (options.grid.borderColor == null) { - options.grid.borderColor = options.grid.color; - } - - if (options.grid.tickColor == null) { - options.grid.tickColor = $.color.parse(options.grid.color).scale('a', 0.22).toString(); - } - - // Fill in defaults for axis options, including any unspecified - // font-spec fields, if a font-spec was provided. - - // If no x/y axis options were provided, create one of each anyway, - // since the rest of the code assumes that they exist. - - var i, axisOptions, axisCount, - fontSize = placeholder.css("font-size"), - fontSizeDefault = fontSize ? +fontSize.replace("px", "") : 13, - fontDefaults = { - style: placeholder.css("font-style"), - size: Math.round(0.8 * fontSizeDefault), - variant: placeholder.css("font-variant"), - weight: placeholder.css("font-weight"), - family: placeholder.css("font-family") - }; - - axisCount = options.xaxes.length || 1; - for (i = 0; i < axisCount; ++i) { - axisOptions = options.xaxes[i]; - if (axisOptions && !axisOptions.tickColor) { - axisOptions.tickColor = axisOptions.color; - } - - axisOptions = $.extend(true, {}, options.xaxis, axisOptions); - options.xaxes[i] = axisOptions; - - if (axisOptions.font) { - axisOptions.font = $.extend({}, fontDefaults, axisOptions.font); - if (!axisOptions.font.color) { - axisOptions.font.color = axisOptions.color; - } - if (!axisOptions.font.lineHeight) { - axisOptions.font.lineHeight = Math.round(axisOptions.font.size * 1.15); - } - } - } - - axisCount = options.yaxes.length || 1; - for (i = 0; i < axisCount; ++i) { - axisOptions = options.yaxes[i]; - if (axisOptions && !axisOptions.tickColor) { - axisOptions.tickColor = axisOptions.color; - } - - axisOptions = $.extend(true, {}, options.yaxis, axisOptions); - options.yaxes[i] = axisOptions; - - if (axisOptions.font) { - axisOptions.font = $.extend({}, fontDefaults, axisOptions.font); - if (!axisOptions.font.color) { - axisOptions.font.color = axisOptions.color; - } - if (!axisOptions.font.lineHeight) { - axisOptions.font.lineHeight = Math.round(axisOptions.font.size * 1.15); - } - } - } - - // save options on axes for future reference - for (i = 0; i < options.xaxes.length; ++i) { - getOrCreateAxis(xaxes, i + 1).options = options.xaxes[i]; - } - - for (i = 0; i < options.yaxes.length; ++i) { - getOrCreateAxis(yaxes, i + 1).options = options.yaxes[i]; - } - - //process boxPosition options used for axis.box size - $.each(allAxes(), function(_, axis) { - axis.boxPosition = axis.options.boxPosition || {centerX: 0, centerY: 0}; - }); - - // add hooks from options - for (var n in hooks) { - if (options.hooks[n] && options.hooks[n].length) { - hooks[n] = hooks[n].concat(options.hooks[n]); - } - } - - executeHooks(hooks.processOptions, [options]); - } - - function setData(d) { - var oldseries = series; - series = parseData(d); - fillInSeriesOptions(); - processData(oldseries); - } - - function parseData(d) { - var res = []; - for (var i = 0; i < d.length; ++i) { - var s = $.extend(true, {}, options.series); - - if (d[i].data != null) { - s.data = d[i].data; // move the data instead of deep-copy - delete d[i].data; - - $.extend(true, s, d[i]); - - d[i].data = s.data; - } else { - s.data = d[i]; - } - - res.push(s); - } - - return res; - } - - function axisNumber(obj, coord) { - var a = obj[coord + "axis"]; - if (typeof a === "object") { - // if we got a real axis, extract number - a = a.n; - } - - if (typeof a !== "number") { - a = 1; // default to first axis - } - - return a; - } - - function allAxes() { - // return flat array without annoying null entries - return xaxes.concat(yaxes).filter(function(a) { - return a; - }); - } - - // canvas to axis for cartesian axes - function canvasToCartesianAxisCoords(pos) { - // return an object with x/y corresponding to all used axes - var res = {}, - i, axis; - for (i = 0; i < xaxes.length; ++i) { - axis = xaxes[i]; - if (axis && axis.used) { - res["x" + axis.n] = axis.c2p(pos.left); - } - } - - for (i = 0; i < yaxes.length; ++i) { - axis = yaxes[i]; - if (axis && axis.used) { - res["y" + axis.n] = axis.c2p(pos.top); - } - } - - if (res.x1 !== undefined) { - res.x = res.x1; - } - - if (res.y1 !== undefined) { - res.y = res.y1; - } - - return res; - } - - // axis to canvas for cartesian axes - function cartesianAxisToCanvasCoords(pos) { - // get canvas coords from the first pair of x/y found in pos - var res = {}, - i, axis, key; - - for (i = 0; i < xaxes.length; ++i) { - axis = xaxes[i]; - if (axis && axis.used) { - key = "x" + axis.n; - if (pos[key] == null && axis.n === 1) { - key = "x"; - } - - if (pos[key] != null) { - res.left = axis.p2c(pos[key]); - break; - } - } - } - - for (i = 0; i < yaxes.length; ++i) { - axis = yaxes[i]; - if (axis && axis.used) { - key = "y" + axis.n; - if (pos[key] == null && axis.n === 1) { - key = "y"; - } - - if (pos[key] != null) { - res.top = axis.p2c(pos[key]); - break; - } - } - } - - return res; - } - - function getOrCreateAxis(axes, number) { - if (!axes[number - 1]) { - axes[number - 1] = { - n: number, // save the number for future reference - direction: axes === xaxes ? "x" : "y", - options: $.extend(true, {}, axes === xaxes ? options.xaxis : options.yaxis) - }; - } - - return axes[number - 1]; - } - - function fillInSeriesOptions() { - var neededColors = series.length, - maxIndex = -1, - i; - - // Subtract the number of series that already have fixed colors or - // color indexes from the number that we still need to generate. - - for (i = 0; i < series.length; ++i) { - var sc = series[i].color; - if (sc != null) { - neededColors--; - if (typeof sc === "number" && sc > maxIndex) { - maxIndex = sc; - } - } - } - - // If any of the series have fixed color indexes, then we need to - // generate at least as many colors as the highest index. - - if (neededColors <= maxIndex) { - neededColors = maxIndex + 1; - } - - // Generate all the colors, using first the option colors and then - // variations on those colors once they're exhausted. - - var c, colors = [], - colorPool = options.colors, - colorPoolSize = colorPool.length, - variation = 0, - definedColors = Math.max(0, series.length - neededColors); - - for (i = 0; i < neededColors; i++) { - c = $.color.parse(colorPool[(definedColors + i) % colorPoolSize] || "#666"); - - // Each time we exhaust the colors in the pool we adjust - // a scaling factor used to produce more variations on - // those colors. The factor alternates negative/positive - // to produce lighter/darker colors. - - // Reset the variation after every few cycles, or else - // it will end up producing only white or black colors. - - if (i % colorPoolSize === 0 && i) { - if (variation >= 0) { - if (variation < 0.5) { - variation = -variation - 0.2; - } else variation = 0; - } else variation = -variation; - } - - colors[i] = c.scale('rgb', 1 + variation); - } - - // Finalize the series options, filling in their colors - - var colori = 0, - s; - for (i = 0; i < series.length; ++i) { - s = series[i]; - - // assign colors - if (s.color == null) { - s.color = colors[colori].toString(); - ++colori; - } else if (typeof s.color === "number") { - s.color = colors[s.color].toString(); - } - - // turn on lines automatically in case nothing is set - if (s.lines.show == null) { - var v, show = true; - for (v in s) { - if (s[v] && s[v].show) { - show = false; - break; - } - } - - if (show) { - s.lines.show = true; - } - } - - // If nothing was provided for lines.zero, default it to match - // lines.fill, since areas by default should extend to zero. - - if (s.lines.zero == null) { - s.lines.zero = !!s.lines.fill; - } - - // setup axes - s.xaxis = getOrCreateAxis(xaxes, axisNumber(s, "x")); - s.yaxis = getOrCreateAxis(yaxes, axisNumber(s, "y")); - } - } - - function processData(prevSeries) { - var topSentry = Number.POSITIVE_INFINITY, - bottomSentry = Number.NEGATIVE_INFINITY, - i, j, k, m, - s, points, ps, val, f, p, - data, format; - - function updateAxis(axis, min, max) { - if (min < axis.datamin && min !== -Infinity) { - axis.datamin = min; - } - - if (max > axis.datamax && max !== Infinity) { - axis.datamax = max; - } - } - - function reusePoints(prevSeries, i) { - if (prevSeries && prevSeries[i] && prevSeries[i].datapoints && prevSeries[i].datapoints.points) { - return prevSeries[i].datapoints.points; - } - - return []; - } - - $.each(allAxes(), function(_, axis) { - // init axis - if (axis.options.growOnly !== true) { - axis.datamin = topSentry; - axis.datamax = bottomSentry; - } else { - if (axis.datamin === undefined) { - axis.datamin = topSentry; - } - if (axis.datamax === undefined) { - axis.datamax = bottomSentry; - } - } - axis.used = false; - }); - - for (i = 0; i < series.length; ++i) { - s = series[i]; - s.datapoints = { - points: [] - }; - - if (s.datapoints.points.length === 0) { - s.datapoints.points = reusePoints(prevSeries, i); - } - - executeHooks(hooks.processRawData, [s, s.data, s.datapoints]); - } - - // first pass: clean and copy data - for (i = 0; i < series.length; ++i) { - s = series[i]; - - data = s.data; - format = s.datapoints.format; - - if (!format) { - format = []; - // find out how to copy - format.push({ - x: true, - y: false, - number: true, - required: true, - computeRange: s.xaxis.options.autoScale !== 'none', - defaultValue: null - }); - - format.push({ - x: false, - y: true, - number: true, - required: true, - computeRange: s.yaxis.options.autoScale !== 'none', - defaultValue: null - }); - - if (s.stack || s.bars.show || (s.lines.show && s.lines.fill)) { - var expectedPs = s.datapoints.pointsize != null ? s.datapoints.pointsize : (s.data && s.data[0] && s.data[0].length ? s.data[0].length : 3); - if (expectedPs > 2) { - format.push({ - x: false, - y: true, - number: true, - required: false, - computeRange: s.yaxis.options.autoScale !== 'none', - defaultValue: 0 - }); - } - } - - s.datapoints.format = format; - } - - s.xaxis.used = s.yaxis.used = true; - - if (s.datapoints.pointsize != null) continue; // already filled in - - s.datapoints.pointsize = format.length; - ps = s.datapoints.pointsize; - points = s.datapoints.points; - - var insertSteps = s.lines.show && s.lines.steps; - - for (j = k = 0; j < data.length; ++j, k += ps) { - p = data[j]; - - var nullify = p == null; - if (!nullify) { - for (m = 0; m < ps; ++m) { - val = p[m]; - f = format[m]; - - if (f) { - if (f.number && val != null) { - val = +val; // convert to number - if (isNaN(val)) { - val = null; - } - } - - if (val == null) { - if (f.required) nullify = true; - - if (f.defaultValue != null) val = f.defaultValue; - } - } - - points[k + m] = val; - } - } - - if (nullify) { - for (m = 0; m < ps; ++m) { - val = points[k + m]; - if (val != null) { - f = format[m]; - // extract min/max info - if (f.computeRange) { - if (f.x) { - updateAxis(s.xaxis, val, val); - } - if (f.y) { - updateAxis(s.yaxis, val, val); - } - } - } - points[k + m] = null; - } - } else { - // a little bit of line specific stuff that - // perhaps shouldn't be here, but lacking - // better means... - if (insertSteps && k > 0 && - points[k - ps] != null && - points[k - ps] !== points[k] && - points[k - ps + 1] !== points[k + 1]) { - // copy the point to make room for a middle point - for (m = 0; m < ps; ++m) { - points[k + ps + m] = points[k + m]; - } - - // middle point has same y - points[k + 1] = points[k - ps + 1]; - - // we've added a point, better reflect that - k += ps; - } - } - } - - points.length = k; //trims the internal buffer to the correct length - } - - // give the hooks a chance to run - for (i = 0; i < series.length; ++i) { - s = series[i]; - - executeHooks(hooks.processDatapoints, [s, s.datapoints]); - } - - // second pass: find datamax/datamin for auto-scaling - for (i = 0; i < series.length; ++i) { - s = series[i]; - format = s.datapoints.format; - - if (format.every(function (f) { return !f.computeRange; })) { - continue; - } - - var range = plot.adjustSeriesDataRange(s, - plot.computeRangeForDataSeries(s)); - - executeHooks(hooks.adjustSeriesDataRange, [s, range]); - - updateAxis(s.xaxis, range.xmin, range.xmax); - updateAxis(s.yaxis, range.ymin, range.ymax); - } - - $.each(allAxes(), function(_, axis) { - if (axis.datamin === topSentry) { - axis.datamin = null; - } - - if (axis.datamax === bottomSentry) { - axis.datamax = null; - } - }); - } - - function setupCanvases() { - // Make sure the placeholder is clear of everything except canvases - // from a previous plot in this container that we'll try to re-use. - - placeholder.css("padding", 0) // padding messes up the positioning - .children().filter(function() { - return !$(this).hasClass("flot-overlay") && !$(this).hasClass('flot-base'); - }).remove(); - - if (placeholder.css("position") === 'static') { - placeholder.css("position", "relative"); // for positioning labels and overlay - } - - surface = new Canvas("flot-base", placeholder[0]); - overlay = new Canvas("flot-overlay", placeholder[0]); // overlay canvas for interactive features - - ctx = surface.context; - octx = overlay.context; - - // define which element we're listening for events on - eventHolder = $(overlay.element).unbind(); - - // If we're re-using a plot object, shut down the old one - - var existing = placeholder.data("plot"); - - if (existing) { - existing.shutdown(); - overlay.clear(); - } - - // save in case we get replotted - placeholder.data("plot", plot); - } - - function bindEvents() { - executeHooks(hooks.bindEvents, [eventHolder]); - } - - function addEventHandler(event, handler, eventHolder, priority) { - var key = eventHolder + event; - var eventList = eventManager[key] || []; - - eventList.push({"event": event, "handler": handler, "eventHolder": eventHolder, "priority": priority}); - eventList.sort((a, b) => b.priority - a.priority ); - eventList.forEach( eventData => { - eventData.eventHolder.unbind(eventData.event, eventData.handler); - eventData.eventHolder.bind(eventData.event, eventData.handler); - }); - - eventManager[key] = eventList; - } - - function shutdown() { - if (redrawTimeout) { - clearTimeout(redrawTimeout); - } - - executeHooks(hooks.shutdown, [eventHolder]); - } - - function setTransformationHelpers(axis) { - // set helper functions on the axis, assumes plot area - // has been computed already - - function identity(x) { - return x; - } - - var s, m, t = axis.options.transform || identity, - it = axis.options.inverseTransform; - - // precompute how much the axis is scaling a point - // in canvas space - if (axis.direction === "x") { - if (isFinite(t(axis.max) - t(axis.min))) { - s = axis.scale = plotWidth / Math.abs(t(axis.max) - t(axis.min)); - } else { - s = axis.scale = 1 / Math.abs($.plot.saturated.delta(t(axis.min), t(axis.max), plotWidth)); - } - m = Math.min(t(axis.max), t(axis.min)); - } else { - if (isFinite(t(axis.max) - t(axis.min))) { - s = axis.scale = plotHeight / Math.abs(t(axis.max) - t(axis.min)); - } else { - s = axis.scale = 1 / Math.abs($.plot.saturated.delta(t(axis.min), t(axis.max), plotHeight)); - } - s = -s; - m = Math.max(t(axis.max), t(axis.min)); - } - - // data point to canvas coordinate - if (t === identity) { - // slight optimization - axis.p2c = function(p) { - if (isFinite(p - m)) { - return (p - m) * s; - } else { - return (p / 4 - m / 4) * s * 4; - } - }; - } else { - axis.p2c = function(p) { - var tp = t(p); - - if (isFinite(tp - m)) { - return (tp - m) * s; - } else { - return (tp / 4 - m / 4) * s * 4; - } - }; - } - - // canvas coordinate to data point - if (!it) { - axis.c2p = function(c) { - return m + c / s; - }; - } else { - axis.c2p = function(c) { - return it(m + c / s); - }; - } - } - - function measureTickLabels(axis) { - var opts = axis.options, - ticks = opts.showTickLabels !== 'none' && axis.ticks ? axis.ticks : [], - showMajorTickLabels = opts.showTickLabels === 'major' || opts.showTickLabels === 'all', - showEndpointsTickLabels = opts.showTickLabels === 'endpoints' || opts.showTickLabels === 'all', - labelWidth = opts.labelWidth || 0, - labelHeight = opts.labelHeight || 0, - legacyStyles = axis.direction + "Axis " + axis.direction + axis.n + "Axis", - layer = "flot-" + axis.direction + "-axis flot-" + axis.direction + axis.n + "-axis " + legacyStyles, - font = opts.font || "flot-tick-label tickLabel"; - - for (var i = 0; i < ticks.length; ++i) { - var t = ticks[i]; - var label = t.label; - - if (!t.label || - (showMajorTickLabels === false && i > 0 && i < ticks.length - 1) || - (showEndpointsTickLabels === false && (i === 0 || i === ticks.length - 1))) { - continue; - } - - if (typeof t.label === 'object') { - label = t.label.name; - } - - var info = surface.getTextInfo(layer, label, font); - - labelWidth = Math.max(labelWidth, info.width); - labelHeight = Math.max(labelHeight, info.height); - } - - axis.labelWidth = opts.labelWidth || labelWidth; - axis.labelHeight = opts.labelHeight || labelHeight; - } - - function allocateAxisBoxFirstPhase(axis) { - // find the bounding box of the axis by looking at label - // widths/heights and ticks, make room by diminishing the - // plotOffset; this first phase only looks at one - // dimension per axis, the other dimension depends on the - // other axes so will have to wait - - // here reserve additional space - executeHooks(hooks.axisReserveSpace, [axis]); - - var lw = axis.labelWidth, - lh = axis.labelHeight, - pos = axis.options.position, - isXAxis = axis.direction === "x", - tickLength = axis.options.tickLength, - showTicks = axis.options.showTicks, - showMinorTicks = axis.options.showMinorTicks, - gridLines = axis.options.gridLines, - axisMargin = options.grid.axisMargin, - padding = options.grid.labelMargin, - innermost = true, - outermost = true, - found = false; - - // Determine the axis's position in its direction and on its side - - $.each(isXAxis ? xaxes : yaxes, function(i, a) { - if (a && (a.show || a.reserveSpace)) { - if (a === axis) { - found = true; - } else if (a.options.position === pos) { - if (found) { - outermost = false; - } else { - innermost = false; - } - } - } - }); - - // The outermost axis on each side has no margin - if (outermost) { - axisMargin = 0; - } - - // Set the default tickLength if necessary - if (tickLength == null) { - tickLength = TICK_LENGTH_CONSTANT; - } - - // By default, major tick marks are visible - if (showTicks == null) { - showTicks = true; - } - - // By default, minor tick marks are visible - if (showMinorTicks == null) { - showMinorTicks = true; - } - - // By default, grid lines are visible - if (gridLines == null) { - if (innermost) { - gridLines = true; - } else { - gridLines = false; - } - } - - if (!isNaN(+tickLength)) { - padding += showTicks ? +tickLength : 0; - } - - if (isXAxis) { - lh += padding; - - if (pos === "bottom") { - plotOffset.bottom += lh + axisMargin; - axis.box = { - top: surface.height - plotOffset.bottom, - height: lh - }; - } else { - axis.box = { - top: plotOffset.top + axisMargin, - height: lh - }; - plotOffset.top += lh + axisMargin; - } - } else { - lw += padding; - - if (pos === "left") { - axis.box = { - left: plotOffset.left + axisMargin, - width: lw - }; - plotOffset.left += lw + axisMargin; - } else { - plotOffset.right += lw + axisMargin; - axis.box = { - left: surface.width - plotOffset.right, - width: lw - }; - } - } - - // save for future reference - axis.position = pos; - axis.tickLength = tickLength; - axis.showMinorTicks = showMinorTicks; - axis.showTicks = showTicks; - axis.gridLines = gridLines; - axis.box.padding = padding; - axis.innermost = innermost; - } - - function allocateAxisBoxSecondPhase(axis) { - // now that all axis boxes have been placed in one - // dimension, we can set the remaining dimension coordinates - if (axis.direction === "x") { - axis.box.left = plotOffset.left - axis.labelWidth / 2; - axis.box.width = surface.width - plotOffset.left - plotOffset.right + axis.labelWidth; - } else { - axis.box.top = plotOffset.top - axis.labelHeight / 2; - axis.box.height = surface.height - plotOffset.bottom - plotOffset.top + axis.labelHeight; - } - } - - function adjustLayoutForThingsStickingOut() { - // possibly adjust plot offset to ensure everything stays - // inside the canvas and isn't clipped off - - var minMargin = options.grid.minBorderMargin, - i; - - // check stuff from the plot (FIXME: this should just read - // a value from the series, otherwise it's impossible to - // customize) - if (minMargin == null) { - minMargin = 0; - for (i = 0; i < series.length; ++i) { - minMargin = Math.max(minMargin, 2 * (series[i].points.radius + series[i].points.lineWidth / 2)); - } - } - - var a, offset = {}, - margins = { - left: minMargin, - right: minMargin, - top: minMargin, - bottom: minMargin - }; - - // check axis labels, note we don't check the actual - // labels but instead use the overall width/height to not - // jump as much around with replots - $.each(allAxes(), function(_, axis) { - if (axis.reserveSpace && axis.ticks && axis.ticks.length) { - if (axis.direction === "x") { - margins.left = Math.max(margins.left, axis.labelWidth / 2); - margins.right = Math.max(margins.right, axis.labelWidth / 2); - } else { - margins.bottom = Math.max(margins.bottom, axis.labelHeight / 2); - margins.top = Math.max(margins.top, axis.labelHeight / 2); - } - } - }); - - for (a in margins) { - offset[a] = margins[a] - plotOffset[a]; - } - $.each(xaxes.concat(yaxes), function(_, axis) { - alignAxisWithGrid(axis, offset, function (offset) { - return offset > 0; - }); - }); - - plotOffset.left = Math.ceil(Math.max(margins.left, plotOffset.left)); - plotOffset.right = Math.ceil(Math.max(margins.right, plotOffset.right)); - plotOffset.top = Math.ceil(Math.max(margins.top, plotOffset.top)); - plotOffset.bottom = Math.ceil(Math.max(margins.bottom, plotOffset.bottom)); - } - - function alignAxisWithGrid(axis, offset, isValid) { - if (axis.direction === "x") { - if (axis.position === "bottom" && isValid(offset.bottom)) { - axis.box.top -= Math.ceil(offset.bottom); - } - if (axis.position === "top" && isValid(offset.top)) { - axis.box.top += Math.ceil(offset.top); - } - } else { - if (axis.position === "left" && isValid(offset.left)) { - axis.box.left += Math.ceil(offset.left); - } - if (axis.position === "right" && isValid(offset.right)) { - axis.box.left -= Math.ceil(offset.right); - } - } - } - - function setupGrid(autoScale) { - var i, a, axes = allAxes(), - showGrid = options.grid.show; - - // Initialize the plot's offset from the edge of the canvas - - for (a in plotOffset) { - plotOffset[a] = 0; - } - - executeHooks(hooks.processOffset, [plotOffset]); - - // If the grid is visible, add its border width to the offset - for (a in plotOffset) { - if (typeof (options.grid.borderWidth) === "object") { - plotOffset[a] += showGrid ? options.grid.borderWidth[a] : 0; - } else { - plotOffset[a] += showGrid ? options.grid.borderWidth : 0; - } - } - - $.each(axes, function(_, axis) { - var axisOpts = axis.options; - axis.show = axisOpts.show == null ? axis.used : axisOpts.show; - axis.reserveSpace = axisOpts.reserveSpace == null ? axis.show : axisOpts.reserveSpace; - setupTickFormatter(axis); - executeHooks(hooks.setRange, [axis, autoScale]); - setRange(axis, autoScale); - }); - - if (showGrid) { - plotWidth = surface.width - plotOffset.left - plotOffset.right; - plotHeight = surface.height - plotOffset.bottom - plotOffset.top; - - var allocatedAxes = $.grep(axes, function(axis) { - return axis.show || axis.reserveSpace; - }); - - $.each(allocatedAxes, function(_, axis) { - // make the ticks - setupTickGeneration(axis); - setMajorTicks(axis); - snapRangeToTicks(axis, axis.ticks); - - //for computing the endpoints precision, transformationHelpers are needed - setTransformationHelpers(axis); - setEndpointTicks(axis, series); - - // find labelWidth/Height for axis - measureTickLabels(axis); - }); - - // with all dimensions calculated, we can compute the - // axis bounding boxes, start from the outside - // (reverse order) - for (i = allocatedAxes.length - 1; i >= 0; --i) { - allocateAxisBoxFirstPhase(allocatedAxes[i]); - } - - // make sure we've got enough space for things that - // might stick out - adjustLayoutForThingsStickingOut(); - - $.each(allocatedAxes, function(_, axis) { - allocateAxisBoxSecondPhase(axis); - }); - } - - //adjust axis and plotOffset according to grid.margins - if (options.grid.margin) { - for (a in plotOffset) { - var margin = options.grid.margin || 0; - plotOffset[a] += typeof margin === "number" ? margin : (margin[a] || 0); - } - $.each(xaxes.concat(yaxes), function(_, axis) { - alignAxisWithGrid(axis, options.grid.margin, function(offset) { - return offset !== undefined && offset !== null; - }); - }); - } - - //after adjusting the axis, plot width and height will be modified - plotWidth = surface.width - plotOffset.left - plotOffset.right; - plotHeight = surface.height - plotOffset.bottom - plotOffset.top; - - // now we got the proper plot dimensions, we can compute the scaling - $.each(axes, function(_, axis) { - setTransformationHelpers(axis); - }); - - if (showGrid) { - drawAxisLabels(); - } - - executeHooks(hooks.setupGrid, []); - } - - function widenMinMax(minimum, maximum) { - var min = (minimum === undefined ? null : minimum); - var max = (maximum === undefined ? null : maximum); - var delta = max - min; - if (delta === 0.0) { - // degenerate case - var widen = max === 0 ? 1 : 0.01; - var wmin = null; - if (min == null) { - wmin -= widen; - } - - // always widen max if we couldn't widen min to ensure we - // don't fall into min == max which doesn't work - if (max == null || min != null) { - max += widen; - } - - if (wmin != null) { - min = wmin; - } - } - - return { - min: min, - max: max - }; - } - - function autoScaleAxis(axis) { - var opts = axis.options, - min = opts.min, - max = opts.max, - datamin = axis.datamin, - datamax = axis.datamax, - delta; - - switch (opts.autoScale) { - case "none": - min = +(opts.min != null ? opts.min : datamin); - max = +(opts.max != null ? opts.max : datamax); - break; - case "loose": - if (datamin != null && datamax != null) { - min = datamin; - max = datamax; - delta = $.plot.saturated.saturate(max - min); - var margin = ((typeof opts.autoScaleMargin === 'number') ? opts.autoScaleMargin : 0.02); - min = $.plot.saturated.saturate(min - delta * margin); - max = $.plot.saturated.saturate(max + delta * margin); - - // make sure we don't go below zero if all values are positive - if (min < 0 && datamin >= 0) { - min = 0; - } - } else { - min = opts.min; - max = opts.max; - } - break; - case "exact": - min = (datamin != null ? datamin : opts.min); - max = (datamax != null ? datamax : opts.max); - break; - case "sliding-window": - if (datamax > max) { - // move the window to fit the new data, - // keeping the axis range constant - max = datamax; - min = Math.max(datamax - (opts.windowSize || 100), min); - } - break; - } - - var widenedMinMax = widenMinMax(min, max); - min = widenedMinMax.min; - max = widenedMinMax.max; - - // grow loose or grow exact supported - if (opts.growOnly === true && opts.autoScale !== "none" && opts.autoScale !== "sliding-window") { - min = (min < datamin) ? min : (datamin !== null ? datamin : min); - max = (max > datamax) ? max : (datamax !== null ? datamax : max); - } - - axis.autoScaledMin = min; - axis.autoScaledMax = max; - } - - function setRange(axis, autoScale) { - var min = typeof axis.options.min === 'number' ? axis.options.min : axis.min, - max = typeof axis.options.max === 'number' ? axis.options.max : axis.max, - plotOffset = axis.options.offset; - - if (autoScale) { - autoScaleAxis(axis); - min = axis.autoScaledMin; - max = axis.autoScaledMax; - } - - min = (min != null ? min : -1) + (plotOffset.below || 0); - max = (max != null ? max : 1) + (plotOffset.above || 0); - - if (min > max) { - var tmp = min; - min = max; - max = tmp; - axis.options.offset = { above: 0, below: 0 }; - } - - axis.min = $.plot.saturated.saturate(min); - axis.max = $.plot.saturated.saturate(max); - } - - function computeValuePrecision (min, max, direction, ticks, tickDecimals) { - var noTicks = fixupNumberOfTicks(direction, surface, ticks); - - var delta = $.plot.saturated.delta(min, max, noTicks), - dec = -Math.floor(Math.log(delta) / Math.LN10); - - //if it is called with tickDecimals, then the precision should not be greather then that - if (tickDecimals && dec > tickDecimals) { - dec = tickDecimals; - } - - var magn = Math.pow(10, -dec), - norm = delta / magn; - - if (norm > 2.25 && norm < 3 && (dec + 1) <= tickDecimals) { - //we need an extra decimals when tickSize is 2.5 - ++dec; - } - - return isFinite(dec) ? dec : 0; - }; - - function computeTickSize (min, max, noTicks, tickDecimals) { - var delta = $.plot.saturated.delta(min, max, noTicks), - dec = -Math.floor(Math.log(delta) / Math.LN10); - - //if it is called with tickDecimals, then the precision should not be greather then that - if (tickDecimals && dec > tickDecimals) { - dec = tickDecimals; - } - - var magn = Math.pow(10, -dec), - norm = delta / magn, // norm is between 1.0 and 10.0 - size; - - if (norm < 1.5) { - size = 1; - } else if (norm < 3) { - size = 2; - if (norm > 2.25 && (tickDecimals == null || (dec + 1) <= tickDecimals)) { - size = 2.5; - } - } else if (norm < 7.5) { - size = 5; - } else { - size = 10; - } - - size *= magn; - return size; - } - - function getAxisTickSize(min, max, direction, options, tickDecimals) { - var noTicks; - - if (typeof options.ticks === "number" && options.ticks > 0) { - noTicks = options.ticks; - } else { - // heuristic based on the model a*sqrt(x) fitted to - // some data points that seemed reasonable - noTicks = 0.3 * Math.sqrt(direction === "x" ? surface.width : surface.height); - } - - var size = computeTickSize(min, max, noTicks, tickDecimals); - - if (options.minTickSize != null && size < options.minTickSize) { - size = options.minTickSize; - } - - return options.tickSize || size; - }; - - function fixupNumberOfTicks(direction, surface, ticksOption) { - var noTicks; - - if (typeof ticksOption === "number" && ticksOption > 0) { - noTicks = ticksOption; - } else { - noTicks = 0.3 * Math.sqrt(direction === "x" ? surface.width : surface.height); - } - - return noTicks; - } - - function setupTickFormatter(axis) { - var opts = axis.options; - if (!axis.tickFormatter) { - if (typeof opts.tickFormatter === 'function') { - axis.tickFormatter = function() { - var args = Array.prototype.slice.call(arguments); - return "" + opts.tickFormatter.apply(null, args); - }; - } else { - axis.tickFormatter = defaultTickFormatter; - } - } - } - - function setupTickGeneration(axis) { - var opts = axis.options; - var noTicks; - - noTicks = fixupNumberOfTicks(axis.direction, surface, opts.ticks); - - axis.delta = $.plot.saturated.delta(axis.min, axis.max, noTicks); - var precision = plot.computeValuePrecision(axis.min, axis.max, axis.direction, noTicks, opts.tickDecimals); - - axis.tickDecimals = Math.max(0, opts.tickDecimals != null ? opts.tickDecimals : precision); - axis.tickSize = getAxisTickSize(axis.min, axis.max, axis.direction, opts, opts.tickDecimals); - - // Flot supports base-10 axes; any other mode else is handled by a plug-in, - // like flot.time.js. - - if (!axis.tickGenerator) { - if (typeof opts.tickGenerator === 'function') { - axis.tickGenerator = opts.tickGenerator; - } else { - axis.tickGenerator = defaultTickGenerator; - } - } - - if (opts.alignTicksWithAxis != null) { - var otherAxis = (axis.direction === "x" ? xaxes : yaxes)[opts.alignTicksWithAxis - 1]; - if (otherAxis && otherAxis.used && otherAxis !== axis) { - // consider snapping min/max to outermost nice ticks - var niceTicks = axis.tickGenerator(axis, plot); - if (niceTicks.length > 0) { - if (opts.min == null) { - axis.min = Math.min(axis.min, niceTicks[0]); - } - - if (opts.max == null && niceTicks.length > 1) { - axis.max = Math.max(axis.max, niceTicks[niceTicks.length - 1]); - } - } - - axis.tickGenerator = function(axis) { - // copy ticks, scaled to this axis - var ticks = [], - v, i; - for (i = 0; i < otherAxis.ticks.length; ++i) { - v = (otherAxis.ticks[i].v - otherAxis.min) / (otherAxis.max - otherAxis.min); - v = axis.min + v * (axis.max - axis.min); - ticks.push(v); - } - return ticks; - }; - - // we might need an extra decimal since forced - // ticks don't necessarily fit naturally - if (!axis.mode && opts.tickDecimals == null) { - var extraDec = Math.max(0, -Math.floor(Math.log(axis.delta) / Math.LN10) + 1), - ts = axis.tickGenerator(axis, plot); - - // only proceed if the tick interval rounded - // with an extra decimal doesn't give us a - // zero at end - if (!(ts.length > 1 && /\..*0$/.test((ts[1] - ts[0]).toFixed(extraDec)))) { - axis.tickDecimals = extraDec; - } - } - } - } - } - - function setMajorTicks(axis) { - var oticks = axis.options.ticks, - ticks = []; - if (oticks == null || (typeof oticks === "number" && oticks > 0)) { - ticks = axis.tickGenerator(axis, plot); - } else if (oticks) { - if ($.isFunction(oticks)) { - // generate the ticks - ticks = oticks(axis); - } else { - ticks = oticks; - } - } - - // clean up/labelify the supplied ticks, copy them over - var i, v; - axis.ticks = []; - for (i = 0; i < ticks.length; ++i) { - var label = null; - var t = ticks[i]; - if (typeof t === "object") { - v = +t[0]; - if (t.length > 1) { - label = t[1]; - } - } else { - v = +t; - } - - if (!isNaN(v)) { - axis.ticks.push( - newTick(v, label, axis, 'major')); - } - } - } - - function newTick(v, label, axis, type) { - if (label === null) { - switch (type) { - case 'min': - case 'max': - //improving the precision of endpoints - var precision = getEndpointPrecision(v, axis); - label = isFinite(precision) ? axis.tickFormatter(v, axis, precision, plot) : axis.tickFormatter(v, axis, precision, plot); - break; - case 'major': - label = axis.tickFormatter(v, axis, undefined, plot); - } - } - return { - v: v, - label: label - }; - } - - function snapRangeToTicks(axis, ticks) { - if (axis.options.autoScale === "loose" && ticks.length > 0) { - // snap to ticks - axis.min = Math.min(axis.min, ticks[0].v); - axis.max = Math.max(axis.max, ticks[ticks.length - 1].v); - } - } - - function getEndpointPrecision(value, axis) { - var canvas1 = Math.floor(axis.p2c(value)), - canvas2 = axis.direction === "x" ? canvas1 + 1 : canvas1 - 1, - point1 = axis.c2p(canvas1), - point2 = axis.c2p(canvas2), - precision = computeValuePrecision(point1, point2, axis.direction, 1); - - return precision; - } - - function setEndpointTicks(axis, series) { - if (isValidEndpointTick(axis, series)) { - axis.ticks.unshift(newTick(axis.min, null, axis, 'min')); - axis.ticks.push(newTick(axis.max, null, axis, 'max')); - } - } - - function isValidEndpointTick(axis, series) { - if (axis.options.showTickLabels === 'endpoints') { - return true; - } - if (axis.options.showTickLabels === 'all') { - var associatedSeries = series.filter(function(s) { - return s.xaxis === axis; - }), - notAllBarSeries = associatedSeries.some(function(s) { - return !s.bars.show; - }); - return associatedSeries.length === 0 || notAllBarSeries; - } - if (axis.options.showTickLabels === 'major' || axis.options.showTickLabels === 'none') { - return false; - } - } - - function draw() { - surface.clear(); - executeHooks(hooks.drawBackground, [ctx]); - - var grid = options.grid; - - // draw background, if any - if (grid.show && grid.backgroundColor) { - drawBackground(); - } - - if (grid.show && !grid.aboveData) { - drawGrid(); - } - - for (var i = 0; i < series.length; ++i) { - executeHooks(hooks.drawSeries, [ctx, series[i], i, getColorOrGradient]); - drawSeries(series[i]); - } - - executeHooks(hooks.draw, [ctx]); - - if (grid.show && grid.aboveData) { - drawGrid(); - } - - surface.render(); - - // A draw implies that either the axes or data have changed, so we - // should probably update the overlay highlights as well. - triggerRedrawOverlay(); - } - - function extractRange(ranges, coord) { - var axis, from, to, key, axes = allAxes(); - - for (var i = 0; i < axes.length; ++i) { - axis = axes[i]; - if (axis.direction === coord) { - key = coord + axis.n + "axis"; - if (!ranges[key] && axis.n === 1) { - // support x1axis as xaxis - key = coord + "axis"; - } - - if (ranges[key]) { - from = ranges[key].from; - to = ranges[key].to; - break; - } - } - } - - // backwards-compat stuff - to be removed in future - if (!ranges[key]) { - axis = coord === "x" ? xaxes[0] : yaxes[0]; - from = ranges[coord + "1"]; - to = ranges[coord + "2"]; - } - - // auto-reverse as an added bonus - if (from != null && to != null && from > to) { - var tmp = from; - from = to; - to = tmp; - } - - return { - from: from, - to: to, - axis: axis - }; - } - - function drawBackground() { - ctx.save(); - ctx.translate(plotOffset.left, plotOffset.top); - - ctx.fillStyle = getColorOrGradient(options.grid.backgroundColor, plotHeight, 0, "rgba(255, 255, 255, 0)"); - ctx.fillRect(0, 0, plotWidth, plotHeight); - ctx.restore(); - } - - function drawMarkings() { - // draw markings - var markings = options.grid.markings, - axes; - - if (markings) { - if ($.isFunction(markings)) { - axes = plot.getAxes(); - // xmin etc. is backwards compatibility, to be - // removed in the future - axes.xmin = axes.xaxis.min; - axes.xmax = axes.xaxis.max; - axes.ymin = axes.yaxis.min; - axes.ymax = axes.yaxis.max; - - markings = markings(axes); - } - - var i; - for (i = 0; i < markings.length; ++i) { - var m = markings[i], - xrange = extractRange(m, "x"), - yrange = extractRange(m, "y"); - - // fill in missing - if (xrange.from == null) { - xrange.from = xrange.axis.min; - } - - if (xrange.to == null) { - xrange.to = xrange.axis.max; - } - - if (yrange.from == null) { - yrange.from = yrange.axis.min; - } - - if (yrange.to == null) { - yrange.to = yrange.axis.max; - } - - // clip - if (xrange.to < xrange.axis.min || xrange.from > xrange.axis.max || - yrange.to < yrange.axis.min || yrange.from > yrange.axis.max) { - continue; - } - - xrange.from = Math.max(xrange.from, xrange.axis.min); - xrange.to = Math.min(xrange.to, xrange.axis.max); - yrange.from = Math.max(yrange.from, yrange.axis.min); - yrange.to = Math.min(yrange.to, yrange.axis.max); - - var xequal = xrange.from === xrange.to, - yequal = yrange.from === yrange.to; - - if (xequal && yequal) { - continue; - } - - // then draw - xrange.from = Math.floor(xrange.axis.p2c(xrange.from)); - xrange.to = Math.floor(xrange.axis.p2c(xrange.to)); - yrange.from = Math.floor(yrange.axis.p2c(yrange.from)); - yrange.to = Math.floor(yrange.axis.p2c(yrange.to)); - - if (xequal || yequal) { - var lineWidth = m.lineWidth || options.grid.markingsLineWidth, - subPixel = lineWidth % 2 ? 0.5 : 0; - ctx.beginPath(); - ctx.strokeStyle = m.color || options.grid.markingsColor; - ctx.lineWidth = lineWidth; - if (xequal) { - ctx.moveTo(xrange.to + subPixel, yrange.from); - ctx.lineTo(xrange.to + subPixel, yrange.to); - } else { - ctx.moveTo(xrange.from, yrange.to + subPixel); - ctx.lineTo(xrange.to, yrange.to + subPixel); - } - ctx.stroke(); - } else { - ctx.fillStyle = m.color || options.grid.markingsColor; - ctx.fillRect(xrange.from, yrange.to, - xrange.to - xrange.from, - yrange.from - yrange.to); - } - } - } - } - - function findEdges(axis) { - var box = axis.box, - x = 0, - y = 0; - - // find the edges - if (axis.direction === "x") { - x = 0; - y = box.top - plotOffset.top + (axis.position === "top" ? box.height : 0); - } else { - y = 0; - x = box.left - plotOffset.left + (axis.position === "left" ? box.width : 0) + axis.boxPosition.centerX; - } - - return { - x: x, - y: y - }; - }; - - function alignPosition(lineWidth, pos) { - return ((lineWidth % 2) !== 0) ? Math.floor(pos) + 0.5 : pos; - }; - - function drawTickBar(axis) { - ctx.lineWidth = 1; - var edges = findEdges(axis), - x = edges.x, - y = edges.y; - - // draw tick bar - if (axis.show) { - var xoff = 0, - yoff = 0; - - ctx.strokeStyle = axis.options.color; - ctx.beginPath(); - if (axis.direction === "x") { - xoff = plotWidth + 1; - } else { - yoff = plotHeight + 1; - } - - if (axis.direction === "x") { - y = alignPosition(ctx.lineWidth, y); - } else { - x = alignPosition(ctx.lineWidth, x); - } - - ctx.moveTo(x, y); - ctx.lineTo(x + xoff, y + yoff); - ctx.stroke(); - } - }; - - function drawTickMarks(axis) { - var t = axis.tickLength, - minorTicks = axis.showMinorTicks, - minorTicksNr = MINOR_TICKS_COUNT_CONSTANT, - edges = findEdges(axis), - x = edges.x, - y = edges.y, - i = 0; - - // draw major tick marks - ctx.strokeStyle = axis.options.color; - ctx.beginPath(); - - for (i = 0; i < axis.ticks.length; ++i) { - var v = axis.ticks[i].v, - xoff = 0, - yoff = 0, - xminor = 0, - yminor = 0, - j; - - if (!isNaN(v) && v >= axis.min && v <= axis.max) { - if (axis.direction === "x") { - x = axis.p2c(v); - yoff = t; - - if (axis.position === "top") { - yoff = -yoff; - } - } else { - y = axis.p2c(v); - xoff = t; - - if (axis.position === "left") { - xoff = -xoff; - } - } - - if (axis.direction === "x") { - x = alignPosition(ctx.lineWidth, x); - } else { - y = alignPosition(ctx.lineWidth, y); - } - - ctx.moveTo(x, y); - ctx.lineTo(x + xoff, y + yoff); - } - - //draw minor tick marks - if (minorTicks === true && i < axis.ticks.length - 1) { - var v1 = axis.ticks[i].v, - v2 = axis.ticks[i + 1].v, - step = (v2 - v1) / (minorTicksNr + 1); - - for (j = 1; j <= minorTicksNr; j++) { - // compute minor tick position - if (axis.direction === "x") { - yminor = t / 2; // minor ticks are half length - x = alignPosition(ctx.lineWidth, axis.p2c(v1 + j * step)) - - if (axis.position === "top") { - yminor = -yminor; - } - - // don't go over the plot borders - if ((x < 0) || (x > plotWidth)) { - continue; - } - } else { - xminor = t / 2; // minor ticks are half length - y = alignPosition(ctx.lineWidth, axis.p2c(v1 + j * step)); - - if (axis.position === "left") { - xminor = -xminor; - } - - // don't go over the plot borders - if ((y < 0) || (y > plotHeight)) { - continue; - } - } - - ctx.moveTo(x, y); - ctx.lineTo(x + xminor, y + yminor); - } - } - } - - ctx.stroke(); - }; - - function drawGridLines(axis) { - // check if the line will be overlapped with a border - var overlappedWithBorder = function (value) { - var bw = options.grid.borderWidth; - return (((typeof bw === "object" && bw[axis.position] > 0) || bw > 0) && (value === axis.min || value === axis.max)); - }; - - ctx.strokeStyle = options.grid.tickColor; - ctx.beginPath(); - var i; - for (i = 0; i < axis.ticks.length; ++i) { - var v = axis.ticks[i].v, - xoff = 0, - yoff = 0, - x = 0, - y = 0; - - if (isNaN(v) || v < axis.min || v > axis.max) continue; - - // skip those lying on the axes if we got a border - if (overlappedWithBorder(v)) continue; - - if (axis.direction === "x") { - x = axis.p2c(v); - y = plotHeight; - yoff = -plotHeight; - } else { - x = 0; - y = axis.p2c(v); - xoff = plotWidth; - } - - if (axis.direction === "x") { - x = alignPosition(ctx.lineWidth, x); - } else { - y = alignPosition(ctx.lineWidth, y); - } - - ctx.moveTo(x, y); - ctx.lineTo(x + xoff, y + yoff); - } - - ctx.stroke(); - }; - - function drawBorder() { - // If either borderWidth or borderColor is an object, then draw the border - // line by line instead of as one rectangle - var bw = options.grid.borderWidth, - bc = options.grid.borderColor; - - if (typeof bw === "object" || typeof bc === "object") { - if (typeof bw !== "object") { - bw = { - top: bw, - right: bw, - bottom: bw, - left: bw - }; - } - if (typeof bc !== "object") { - bc = { - top: bc, - right: bc, - bottom: bc, - left: bc - }; - } - - if (bw.top > 0) { - ctx.strokeStyle = bc.top; - ctx.lineWidth = bw.top; - ctx.beginPath(); - ctx.moveTo(0 - bw.left, 0 - bw.top / 2); - ctx.lineTo(plotWidth, 0 - bw.top / 2); - ctx.stroke(); - } - - if (bw.right > 0) { - ctx.strokeStyle = bc.right; - ctx.lineWidth = bw.right; - ctx.beginPath(); - ctx.moveTo(plotWidth + bw.right / 2, 0 - bw.top); - ctx.lineTo(plotWidth + bw.right / 2, plotHeight); - ctx.stroke(); - } - - if (bw.bottom > 0) { - ctx.strokeStyle = bc.bottom; - ctx.lineWidth = bw.bottom; - ctx.beginPath(); - ctx.moveTo(plotWidth + bw.right, plotHeight + bw.bottom / 2); - ctx.lineTo(0, plotHeight + bw.bottom / 2); - ctx.stroke(); - } - - if (bw.left > 0) { - ctx.strokeStyle = bc.left; - ctx.lineWidth = bw.left; - ctx.beginPath(); - ctx.moveTo(0 - bw.left / 2, plotHeight + bw.bottom); - ctx.lineTo(0 - bw.left / 2, 0); - ctx.stroke(); - } - } else { - ctx.lineWidth = bw; - ctx.strokeStyle = options.grid.borderColor; - ctx.strokeRect(-bw / 2, -bw / 2, plotWidth + bw, plotHeight + bw); - } - }; - - function drawGrid() { - var axes, bw; - - ctx.save(); - ctx.translate(plotOffset.left, plotOffset.top); - - drawMarkings(); - - axes = allAxes(); - bw = options.grid.borderWidth; - - for (var j = 0; j < axes.length; ++j) { - var axis = axes[j]; - - if (!axis.show) { - continue; - } - - drawTickBar(axis); - if (axis.showTicks === true) { - drawTickMarks(axis); - } - - if (axis.gridLines === true) { - drawGridLines(axis, bw); - } - } - - // draw border - if (bw) { - drawBorder(); - } - - ctx.restore(); - } - - function drawAxisLabels() { - $.each(allAxes(), function(_, axis) { - var box = axis.box, - legacyStyles = axis.direction + "Axis " + axis.direction + axis.n + "Axis", - layer = "flot-" + axis.direction + "-axis flot-" + axis.direction + axis.n + "-axis " + legacyStyles, - font = axis.options.font || "flot-tick-label tickLabel", - i, x, y, halign, valign, info, - margin = 3, - nullBox = {x: NaN, y: NaN, width: NaN, height: NaN}, newLabelBox, labelBoxes = [], - overlapping = function(x11, y11, x12, y12, x21, y21, x22, y22) { - return ((x11 <= x21 && x21 <= x12) || (x21 <= x11 && x11 <= x22)) && - ((y11 <= y21 && y21 <= y12) || (y21 <= y11 && y11 <= y22)); - }, - overlapsOtherLabels = function(newLabelBox, previousLabelBoxes) { - return previousLabelBoxes.some(function(labelBox) { - return overlapping( - newLabelBox.x, newLabelBox.y, newLabelBox.x + newLabelBox.width, newLabelBox.y + newLabelBox.height, - labelBox.x, labelBox.y, labelBox.x + labelBox.width, labelBox.y + labelBox.height); - }); - }, - drawAxisLabel = function (tick, labelBoxes) { - if (!tick || !tick.label || tick.v < axis.min || tick.v > axis.max) { - return nullBox; - } - - info = surface.getTextInfo(layer, tick.label, font); - - if (axis.direction === "x") { - halign = "center"; - x = plotOffset.left + axis.p2c(tick.v); - if (axis.position === "bottom") { - y = box.top + box.padding - axis.boxPosition.centerY; - } else { - y = box.top + box.height - box.padding + axis.boxPosition.centerY; - valign = "bottom"; - } - newLabelBox = {x: x - info.width / 2 - margin, y: y - margin, width: info.width + 2 * margin, height: info.height + 2 * margin}; - } else { - valign = "middle"; - y = plotOffset.top + axis.p2c(tick.v); - if (axis.position === "left") { - x = box.left + box.width - box.padding - axis.boxPosition.centerX; - halign = "right"; - } else { - x = box.left + box.padding + axis.boxPosition.centerX; - } - newLabelBox = {x: x - info.width / 2 - margin, y: y - margin, width: info.width + 2 * margin, height: info.height + 2 * margin}; - } - - if (overlapsOtherLabels(newLabelBox, labelBoxes)) { - return nullBox; - } - - surface.addText(layer, x, y, tick.label, font, null, null, halign, valign); - - return newLabelBox; - }; - - // Remove text before checking for axis.show and ticks.length; - // otherwise plugins, like flot-tickrotor, that draw their own - // tick labels will end up with both theirs and the defaults. - - surface.removeText(layer); - - executeHooks(hooks.drawAxis, [axis, surface]); - - if (!axis.show) { - return; - } - - switch (axis.options.showTickLabels) { - case 'none': - break; - case 'endpoints': - labelBoxes.push(drawAxisLabel(axis.ticks[0], labelBoxes)); - labelBoxes.push(drawAxisLabel(axis.ticks[axis.ticks.length - 1], labelBoxes)); - break; - case 'major': - labelBoxes.push(drawAxisLabel(axis.ticks[0], labelBoxes)); - labelBoxes.push(drawAxisLabel(axis.ticks[axis.ticks.length - 1], labelBoxes)); - for (i = 1; i < axis.ticks.length - 1; ++i) { - labelBoxes.push(drawAxisLabel(axis.ticks[i], labelBoxes)); - } - break; - case 'all': - labelBoxes.push(drawAxisLabel(axis.ticks[0], [])); - labelBoxes.push(drawAxisLabel(axis.ticks[axis.ticks.length - 1], labelBoxes)); - for (i = 1; i < axis.ticks.length - 1; ++i) { - labelBoxes.push(drawAxisLabel(axis.ticks[i], labelBoxes)); - } - break; - } - }); - } - - function drawSeries(series) { - if (series.lines.show) { - $.plot.drawSeries.drawSeriesLines(series, ctx, plotOffset, plotWidth, plotHeight, plot.drawSymbol, getColorOrGradient); - } - - if (series.bars.show) { - $.plot.drawSeries.drawSeriesBars(series, ctx, plotOffset, plotWidth, plotHeight, plot.drawSymbol, getColorOrGradient); - } - - if (series.points.show) { - $.plot.drawSeries.drawSeriesPoints(series, ctx, plotOffset, plotWidth, plotHeight, plot.drawSymbol, getColorOrGradient); - } - } - - function computeRangeForDataSeries(series, force, isValid) { - var points = series.datapoints.points, - ps = series.datapoints.pointsize, - format = series.datapoints.format, - topSentry = Number.POSITIVE_INFINITY, - bottomSentry = Number.NEGATIVE_INFINITY, - range = { - xmin: topSentry, - ymin: topSentry, - xmax: bottomSentry, - ymax: bottomSentry - }; - - for (var j = 0; j < points.length; j += ps) { - if (points[j] === null) { - continue; - } - - if (typeof (isValid) === 'function' && !isValid(points[j])) { - continue; - } - - for (var m = 0; m < ps; ++m) { - var val = points[j + m], - f = format[m]; - if (f === null || f === undefined) { - continue; - } - - if (typeof (isValid) === 'function' && !isValid(val)) { - continue; - } - - if ((!force && !f.computeRange) || val === Infinity || val === -Infinity) { - continue; - } - - if (f.x === true) { - if (val < range.xmin) { - range.xmin = val; - } - - if (val > range.xmax) { - range.xmax = val; - } - } - - if (f.y === true) { - if (val < range.ymin) { - range.ymin = val; - } - - if (val > range.ymax) { - range.ymax = val; - } - } - } - } - - return range; - }; - - function adjustSeriesDataRange(series, range) { - if (series.bars.show) { - // make sure we got room for the bar on the dancing floor - var delta; - - // update bar width if needed - var useAbsoluteBarWidth = series.bars.barWidth[1]; - if (series.datapoints && series.datapoints.points && !useAbsoluteBarWidth) { - computeBarWidth(series); - } - - var barWidth = series.bars.barWidth[0] || series.bars.barWidth; - switch (series.bars.align) { - case "left": - delta = 0; - break; - case "right": - delta = -barWidth; - break; - default: - delta = -barWidth / 2; - } - - if (series.bars.horizontal) { - range.ymin += delta; - range.ymax += delta + barWidth; - } - else { - range.xmin += delta; - range.xmax += delta + barWidth; - } - } - - if ((series.bars.show && series.bars.zero) || (series.lines.show && series.lines.zero)) { - var ps = series.datapoints.pointsize; - - // make sure the 0 point is included in the computed y range when requested - if (ps <= 2) { - /*if ps > 0 the points were already taken into account for autoScale */ - range.ymin = Math.min(0, range.ymin); - range.ymax = Math.max(0, range.ymax); - } - } - - return range; - }; - - function computeBarWidth(series) { - var pointsize = series.datapoints.pointsize, minDistance = Number.MAX_VALUE, - distance = series.datapoints.points[pointsize] - series.datapoints.points[0] || 1; - - if (isFinite(distance)) { - minDistance = distance; - } - for (var j = pointsize; j < series.datapoints.points.length - pointsize; j += pointsize) { - distance = Math.abs(series.datapoints.points[pointsize + j] - series.datapoints.points[j]); - if (distance < minDistance && isFinite(distance)) { - minDistance = distance; - } - } - - if (typeof series.bars.barWidth === "number") { - series.bars.barWidth = series.bars.barWidth * minDistance; - } else { - series.bars.barWidth[0] = series.bars.barWidth[0] * minDistance; - } - } - - // returns the data item the mouse is over/ the cursor is closest to, or null if none is found - function findNearbyItem(mouseX, mouseY, seriesFilter, radius, computeDistance) { - var i, j, - item = null, - smallestDistance = radius * radius + 1; - - for (var i = series.length - 1; i >= 0; --i) { - if (!seriesFilter(i)) continue; - - var s = series[i]; - if (!s.datapoints) return; - - if (s.lines.show || s.points.show) { - var found = findNearbyPoint(s, mouseX, mouseY, radius, smallestDistance, computeDistance); - if (found) { - smallestDistance = found.distance; - item = [i, found.dataIndex]; - } - } - - if (s.bars.show && !item) { // no other point can be nearby - var foundIndex = findNearbyBar(s, mouseX, mouseY); - if (foundIndex) item = [i, foundIndex]; - } - } - - if (item) { - i = item[0]; - j = item[1]; - var ps = series[i].datapoints.pointsize; - - return { - datapoint: series[i].datapoints.points.slice(j * ps, (j + 1) * ps), - dataIndex: j, - series: series[i], - seriesIndex: i - }; - } - - return null; - } - - function findNearbyPoint (series, mouseX, mouseY, maxDistance, smallestDistance, computeDistance) { - var mx = series.xaxis.c2p(mouseX), - my = series.yaxis.c2p(mouseY), - maxx = maxDistance / series.xaxis.scale, - maxy = maxDistance / series.yaxis.scale, - points = series.datapoints.points, - ps = series.datapoints.pointsize; - - // with inverse transforms, we can't use the maxx/maxy - // optimization, sadly - if (series.xaxis.options.inverseTransform) { - maxx = Number.MAX_VALUE; - } - - if (series.yaxis.options.inverseTransform) { - maxy = Number.MAX_VALUE; - } - - var found = null; - for (var j = 0; j < points.length; j += ps) { - var x = points[j]; - var y = points[j + 1]; - if (x == null) { - continue; - } - - if (x - mx > maxx || x - mx < -maxx || - y - my > maxy || y - my < -maxy) { - continue; - } - - // We have to calculate distances in pixels, not in - // data units, because the scales of the axes may be different - var dx = Math.abs(series.xaxis.p2c(x) - mouseX); - var dy = Math.abs(series.yaxis.p2c(y) - mouseY); - var dist = computeDistance ? computeDistance(dx, dy) : dx * dx + dy * dy; - - // use <= to ensure last point takes precedence - // (last generally means on top of) - if (dist < smallestDistance) { - smallestDistance = dist; - found = { dataIndex: j / ps, distance: dist }; - } - } - - return found; - } - - function findNearbyBar (series, mouseX, mouseY) { - var barLeft, barRight, - barWidth = series.bars.barWidth[0] || series.bars.barWidth, - mx = series.xaxis.c2p(mouseX), - my = series.yaxis.c2p(mouseY), - points = series.datapoints.points, - ps = series.datapoints.pointsize; - - switch (series.bars.align) { - case "left": - barLeft = 0; - break; - case "right": - barLeft = -barWidth; - break; - default: - barLeft = -barWidth / 2; - } - - barRight = barLeft + barWidth; - - var fillTowards = series.bars.fillTowards || 0; - var bottom = fillTowards > series.yaxis.min ? Math.min(series.yaxis.max, fillTowards) : series.yaxis.min; - - var foundIndex = null; - for (var j = 0; j < points.length; j += ps) { - var x = points[j], y = points[j + 1]; - if (x == null) - continue; - - // for a bar graph, the cursor must be inside the bar - if (series.bars.horizontal ? - (mx <= Math.max(bottom, x) && mx >= Math.min(bottom, x) && - my >= y + barLeft && my <= y + barRight) : - (mx >= x + barLeft && mx <= x + barRight && - my >= Math.min(bottom, y) && my <= Math.max(bottom, y))) - foundIndex = j / ps; - } - - return foundIndex; - } - - function findNearbyInterpolationPoint(posX, posY, seriesFilter) { - var i, j, dist, dx, dy, ps, - item, - smallestDistance = Number.MAX_VALUE; - - for (i = 0; i < series.length; ++i) { - if (!seriesFilter(i)) { - continue; - } - var points = series[i].datapoints.points; - ps = series[i].datapoints.pointsize; - - // if the data is coming from positive -> negative, reverse the comparison - const comparer = points[points.length - ps] < points[0] - ? function (x1, x2) { return x1 > x2 } - : function (x1, x2) { return x2 > x1 }; - - // do not interpolate outside the bounds of the data. - if (comparer(posX, points[0])) { - continue; - } - - // Find the nearest points, x-wise - for (j = ps; j < points.length; j += ps) { - if (comparer(posX, points[j])) { - break; - } - } - - // Now Interpolate - var y, - p1x = points[j - ps], - p1y = points[j - ps + 1], - p2x = points[j], - p2y = points[j + 1]; - - if ((p1x === undefined) || (p2x === undefined) || - (p1y === undefined) || (p2y === undefined)) { - continue; - } - - if (p1x === p2x) { - y = p2y - } else { - y = p1y + (p2y - p1y) * (posX - p1x) / (p2x - p1x); - } - - posY = y; - - dx = Math.abs(series[i].xaxis.p2c(p2x) - posX); - dy = Math.abs(series[i].yaxis.p2c(p2y) - posY); - dist = dx * dx + dy * dy; - - if (dist < smallestDistance) { - smallestDistance = dist; - item = [posX, posY, i, j]; - } - } - - if (item) { - i = item[2]; - j = item[3]; - ps = series[i].datapoints.pointsize; - points = series[i].datapoints.points; - p1x = points[j - ps]; - p1y = points[j - ps + 1]; - p2x = points[j]; - p2y = points[j + 1]; - - return { - datapoint: [item[0], item[1]], - leftPoint: [p1x, p1y], - rightPoint: [p2x, p2y], - seriesIndex: i - }; - } - - return null; - } - - function triggerRedrawOverlay() { - var t = options.interaction.redrawOverlayInterval; - if (t === -1) { // skip event queue - drawOverlay(); - return; - } - - if (!redrawTimeout) { - redrawTimeout = setTimeout(function() { - drawOverlay(plot); - }, t); - } - } - - function drawOverlay(plot) { - redrawTimeout = null; - - if (!octx) { - return; - } - overlay.clear(); - executeHooks(hooks.drawOverlay, [octx, overlay]); - var event = new CustomEvent('onDrawingDone'); - plot.getEventHolder().dispatchEvent(event); - } - - function getColorOrGradient(spec, bottom, top, defaultColor) { - if (typeof spec === "string") { - return spec; - } else { - // assume this is a gradient spec; IE currently only - // supports a simple vertical gradient properly, so that's - // what we support too - var gradient = ctx.createLinearGradient(0, top, 0, bottom); - - for (var i = 0, l = spec.colors.length; i < l; ++i) { - var c = spec.colors[i]; - if (typeof c !== "string") { - var co = $.color.parse(defaultColor); - if (c.brightness != null) { - co = co.scale('rgb', c.brightness); - } - - if (c.opacity != null) { - co.a *= c.opacity; - } - - c = co.toString(); - } - gradient.addColorStop(i / (l - 1), c); - } - - return gradient; - } - } - } - - // Add the plot function to the top level of the jQuery object - - $.plot = function(placeholder, data, options) { - var plot = new Plot($(placeholder), data, options, $.plot.plugins); - return plot; - }; - - $.plot.version = "1.0.3"; - - $.plot.plugins = []; - - // Also add the plot function as a chainable property - $.fn.plot = function(data, options) { - return this.each(function() { - $.plot(this, data, options); - }); - }; - - $.plot.linearTickGenerator = defaultTickGenerator; - $.plot.defaultTickFormatter = defaultTickFormatter; - $.plot.expRepTickFormatter = expRepTickFormatter; -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.legend.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.legend.js deleted file mode 100644 index 3a6ae15..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.legend.js +++ /dev/null @@ -1,395 +0,0 @@ -/* Flot plugin for drawing legends. - -*/ - -(function($) { - var defaultOptions = { - legend: { - show: false, - labelFormatter: null, // fn: string -> string - container: null, // container (as jQuery object) to put legend in, null means default on top of graph - position: 'ne', // position of default legend container within plot - margin: 5, // distance from grid edge to default legend container within plot - sorted: null // default to no legend sorting - } - }; - - function insertLegend(plot, options, placeholder, legendEntries) { - // clear before redraw - if (options.legend.container != null) { - $(options.legend.container).html(''); - } else { - placeholder.find('.legend').remove(); - } - - if (!options.legend.show) { - return; - } - - // Save the legend entries in legend options - var entries = options.legend.legendEntries = legendEntries, - plotOffset = options.legend.plotOffset = plot.getPlotOffset(), - html = [], - entry, labelHtml, iconHtml, - maxLabelLength = 0, - j = 0, - pos = "", - p = options.legend.position, - m = options.legend.margin, - shape = { - name: '', - label: '', - xPos: '', - yPos: '' - }; - - html[j++] = ''; - html[j++] = ''; - html[j++] = svgShapeDefs; - - // Generate html for icons and labels from a list of entries - for (var i = 0; i < entries.length; ++i) { - entry = entries[i]; - iconHtml = ''; - shape.label = entry.label; - shape.xPos = '0em'; - shape.yPos = i * 1.5 + 'em'; - // area - if (entry.options.lines.show && entry.options.lines.fill) { - shape.name = 'area'; - shape.fillColor = entry.color; - iconHtml += getEntryIconHtml(shape); - } - // bars - if (entry.options.bars.show) { - shape.name = 'bar'; - shape.fillColor = entry.color; - iconHtml += getEntryIconHtml(shape); - } - // lines - if (entry.options.lines.show && !entry.options.lines.fill) { - shape.name = 'line'; - shape.strokeColor = entry.color; - shape.strokeWidth = entry.options.lines.lineWidth; - iconHtml += getEntryIconHtml(shape); - } - // points - if (entry.options.points.show) { - shape.name = entry.options.points.symbol; - shape.strokeColor = entry.color; - shape.fillColor = entry.options.points.fillColor; - shape.strokeWidth = entry.options.points.lineWidth; - iconHtml += getEntryIconHtml(shape); - } - - maxLabelLength = maxLabelLength < shape.label.length ? shape.label.length : maxLabelLength; - labelHtml = '' + shape.label + '' - html[j++] = '' + iconHtml + labelHtml + ''; - } - - html[j++] = ''; - if (m[0] == null) { - m = [m, m]; - } - - if (p.charAt(0) === 'n') { - pos += 'top:' + (m[1] + plotOffset.top) + 'px;'; - } else if (p.charAt(0) === 's') { - pos += 'bottom:' + (m[1] + plotOffset.bottom) + 'px;'; - } - - if (p.charAt(1) === 'e') { - pos += 'right:' + (m[0] + plotOffset.right) + 'px;'; - } else if (p.charAt(1) === 'w') { - pos += 'left:' + (m[0] + plotOffset.left) + 'px;'; - } - - var legendEl, - width = 3 + maxLabelLength / 2, - height = entries.length * 1.6; - if (!options.legend.container) { - legendEl = $('
' + html.join('') + '
').appendTo(placeholder); - legendEl.css('width', width + 'em'); - legendEl.css('height', height + 'em'); - legendEl.css('pointerEvents', 'none'); - } else { - legendEl = $(html.join('')).appendTo(options.legend.container)[0]; - options.legend.container.style.width = width + 'em'; - options.legend.container.style.height = height + 'em'; - } - } - - // Generate html for a shape - function getEntryIconHtml(shape) { - var html = '', - name = shape.name, - x = shape.xPos, - y = shape.yPos, - fill = shape.fillColor, - stroke = shape.strokeColor, - width = shape.strokeWidth; - switch (name) { - case 'circle': - html = ''; - break; - case 'diamond': - html = ''; - break; - case 'cross': - html = ''; - break; - case 'rectangle': - html = ''; - break; - case 'plus': - html = ''; - break; - case 'bar': - html = ''; - break; - case 'area': - html = ''; - break; - case 'line': - html = ''; - break; - default: - // default is circle - html = ''; - } - - return html; - } - - // Define svg symbols for shapes - var svgShapeDefs = '' + - '' + - '' + - '' + - '' + - - '' + - '' + - '' + - - '' + - '' + - '' + - - '' + - '' + - '' + - '' + - '' + - '' + - - '' + - '' + - '' + - '' + - '' + - '' + - - '' + - '' + - '' + - '' + - '' + - '' + - - '' + - '' + - '' + - '' + - '' + - '' + - - '' + - '' + - '' + - '' + - '' + - '' + - ''; - - // Generate a list of legend entries in their final order - function getLegendEntries(series, labelFormatter, sorted) { - var lf = labelFormatter, - legendEntries = series.map(function(s, i) { - return { - label: (lf ? lf(s.label, s) : s.label) || 'Plot ' + (i + 1), - color: s.color, - options: { - lines: s.lines, - points: s.points, - bars: s.bars - } - }; - }); - - // Sort the legend using either the default or a custom comparator - if (sorted) { - if ($.isFunction(sorted)) { - legendEntries.sort(sorted); - } else if (sorted === 'reverse') { - legendEntries.reverse(); - } else { - var ascending = (sorted !== 'descending'); - legendEntries.sort(function(a, b) { - return a.label === b.label - ? 0 - : ((a.label < b.label) !== ascending ? 1 : -1 // Logical XOR - ); - }); - } - } - - return legendEntries; - } - - // return false if opts1 same as opts2 - function checkOptions(opts1, opts2) { - for (var prop in opts1) { - if (opts1.hasOwnProperty(prop)) { - if (opts1[prop] !== opts2[prop]) { - return true; - } - } - } - return false; - } - - // Compare two lists of legend entries - function shouldRedraw(oldEntries, newEntries) { - if (!oldEntries || !newEntries) { - return true; - } - - if (oldEntries.length !== newEntries.length) { - return true; - } - var i, newEntry, oldEntry, newOpts, oldOpts; - for (i = 0; i < newEntries.length; i++) { - newEntry = newEntries[i]; - oldEntry = oldEntries[i]; - - if (newEntry.label !== oldEntry.label) { - return true; - } - - if (newEntry.color !== oldEntry.color) { - return true; - } - - // check for changes in lines options - newOpts = newEntry.options.lines; - oldOpts = oldEntry.options.lines; - if (checkOptions(newOpts, oldOpts)) { - return true; - } - - // check for changes in points options - newOpts = newEntry.options.points; - oldOpts = oldEntry.options.points; - if (checkOptions(newOpts, oldOpts)) { - return true; - } - - // check for changes in bars options - newOpts = newEntry.options.bars; - oldOpts = oldEntry.options.bars; - if (checkOptions(newOpts, oldOpts)) { - return true; - } - } - - return false; - } - - function init(plot) { - plot.hooks.setupGrid.push(function (plot) { - var options = plot.getOptions(); - var series = plot.getData(), - labelFormatter = options.legend.labelFormatter, - oldEntries = options.legend.legendEntries, - oldPlotOffset = options.legend.plotOffset, - newEntries = getLegendEntries(series, labelFormatter, options.legend.sorted), - newPlotOffset = plot.getPlotOffset(); - - if (shouldRedraw(oldEntries, newEntries) || - checkOptions(oldPlotOffset, newPlotOffset)) { - insertLegend(plot, options, plot.getPlaceholder(), newEntries); - } - }); - } - - $.plot.plugins.push({ - init: init, - options: defaultOptions, - name: 'legend', - version: '1.0' - }); -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.logaxis.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.logaxis.js deleted file mode 100644 index 8622f73..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.logaxis.js +++ /dev/null @@ -1,296 +0,0 @@ -/* Pretty handling of log axes. - -Copyright (c) 2007-2014 IOLA and Ole Laursen. -Copyright (c) 2015 Ciprian Ceteras cipix2000@gmail.com. -Copyright (c) 2017 Raluca Portase -Licensed under the MIT license. - -Set axis.mode to "log" to enable. -*/ - -/* global jQuery*/ - -/** -## jquery.flot.logaxis -This plugin is used to create logarithmic axis. This includes tick generation, -formatters and transformers to and from logarithmic representation. - -### Methods and hooks -*/ - -(function ($) { - 'use strict'; - - var options = { - xaxis: {} - }; - - /*tick generators and formatters*/ - var PREFERRED_LOG_TICK_VALUES = computePreferedLogTickValues(Number.MAX_VALUE, 10), - EXTENDED_LOG_TICK_VALUES = computePreferedLogTickValues(Number.MAX_VALUE, 4); - - function computePreferedLogTickValues(endLimit, rangeStep) { - var log10End = Math.floor(Math.log(endLimit) * Math.LOG10E) - 1, - log10Start = -log10End, - val, range, vals = []; - - for (var power = log10Start; power <= log10End; power++) { - range = Math.pow(10, power); - for (var mult = 1; mult < 9; mult += rangeStep) { - val = range * mult; - vals.push(val); - } - } - return vals; - } - - /** - - logTickGenerator(plot, axis, noTicks) - - Generates logarithmic ticks, depending on axis range. - In case the number of ticks that can be generated is less than the expected noTicks/4, - a linear tick generation is used. - */ - var logTickGenerator = function (plot, axis, noTicks) { - var ticks = [], - minIdx = -1, - maxIdx = -1, - surface = plot.getCanvas(), - logTickValues = PREFERRED_LOG_TICK_VALUES, - min = clampAxis(axis, plot), - max = axis.max; - - if (!noTicks) { - noTicks = 0.3 * Math.sqrt(axis.direction === "x" ? surface.width : surface.height); - } - - PREFERRED_LOG_TICK_VALUES.some(function (val, i) { - if (val >= min) { - minIdx = i; - return true; - } else { - return false; - } - }); - - PREFERRED_LOG_TICK_VALUES.some(function (val, i) { - if (val >= max) { - maxIdx = i; - return true; - } else { - return false; - } - }); - - if (maxIdx === -1) { - maxIdx = PREFERRED_LOG_TICK_VALUES.length - 1; - } - - if (maxIdx - minIdx <= noTicks / 4 && logTickValues.length !== EXTENDED_LOG_TICK_VALUES.length) { - //try with multiple of 5 for tick values - logTickValues = EXTENDED_LOG_TICK_VALUES; - minIdx *= 2; - maxIdx *= 2; - } - - var lastDisplayed = null, - inverseNoTicks = 1 / noTicks, - tickValue, pixelCoord, tick; - - // Count the number of tick values would appear, if we can get at least - // nTicks / 4 accept them. - if (maxIdx - minIdx >= noTicks / 4) { - for (var idx = maxIdx; idx >= minIdx; idx--) { - tickValue = logTickValues[idx]; - pixelCoord = (Math.log(tickValue) - Math.log(min)) / (Math.log(max) - Math.log(min)); - tick = tickValue; - - if (lastDisplayed === null) { - lastDisplayed = { - pixelCoord: pixelCoord, - idealPixelCoord: pixelCoord - }; - } else { - if (Math.abs(pixelCoord - lastDisplayed.pixelCoord) >= inverseNoTicks) { - lastDisplayed = { - pixelCoord: pixelCoord, - idealPixelCoord: lastDisplayed.idealPixelCoord - inverseNoTicks - }; - } else { - tick = null; - } - } - - if (tick) { - ticks.push(tick); - } - } - // Since we went in backwards order. - ticks.reverse(); - } else { - var tickSize = plot.computeTickSize(min, max, noTicks), - customAxis = {min: min, max: max, tickSize: tickSize}; - ticks = $.plot.linearTickGenerator(customAxis); - } - - return ticks; - }; - - var clampAxis = function (axis, plot) { - var min = axis.min, - max = axis.max; - - if (min <= 0) { - //for empty graph if axis.min is not strictly positive make it 0.1 - if (axis.datamin === null) { - min = axis.min = 0.1; - } else { - min = processAxisOffset(plot, axis); - } - - if (max < min) { - axis.max = axis.datamax !== null ? axis.datamax : axis.options.max; - axis.options.offset.below = 0; - axis.options.offset.above = 0; - } - } - - return min; - } - - /** - - logTickFormatter(value, axis, precision) - - This is the corresponding tickFormatter of the logaxis. - For a number greater that 10^6 or smaller than 10^(-3), this will be drawn - with e representation - */ - var logTickFormatter = function (value, axis, precision) { - var tenExponent = value > 0 ? Math.floor(Math.log(value) / Math.LN10) : 0; - - if (precision) { - if ((tenExponent >= -4) && (tenExponent <= 7)) { - return $.plot.defaultTickFormatter(value, axis, precision); - } else { - return $.plot.expRepTickFormatter(value, axis, precision); - } - } - if ((tenExponent >= -4) && (tenExponent <= 7)) { - //if we have float numbers, return a limited length string(ex: 0.0009 is represented as 0.000900001) - var formattedValue = tenExponent < 0 ? value.toFixed(-tenExponent) : value.toFixed(tenExponent + 2); - if (formattedValue.indexOf('.') !== -1) { - var lastZero = formattedValue.lastIndexOf('0'); - - while (lastZero === formattedValue.length - 1) { - formattedValue = formattedValue.slice(0, -1); - lastZero = formattedValue.lastIndexOf('0'); - } - - //delete the dot if is last - if (formattedValue.indexOf('.') === formattedValue.length - 1) { - formattedValue = formattedValue.slice(0, -1); - } - } - return formattedValue; - } else { - return $.plot.expRepTickFormatter(value, axis); - } - }; - - /*logaxis caracteristic functions*/ - var logTransform = function (v) { - if (v < PREFERRED_LOG_TICK_VALUES[0]) { - v = PREFERRED_LOG_TICK_VALUES[0]; - } - - return Math.log(v); - }; - - var logInverseTransform = function (v) { - return Math.exp(v); - }; - - var invertedTransform = function (v) { - return -v; - } - - var invertedLogTransform = function (v) { - return -logTransform(v); - } - - var invertedLogInverseTransform = function (v) { - return logInverseTransform(-v); - } - - /** - - setDataminRange(plot, axis) - - It is used for clamping the starting point of a logarithmic axis. - This will set the axis datamin range to 0.1 or to the first datapoint greater then 0. - The function is usefull since the logarithmic representation can not show - values less than or equal to 0. - */ - function setDataminRange(plot, axis) { - if (axis.options.mode === 'log' && axis.datamin <= 0) { - if (axis.datamin === null) { - axis.datamin = 0.1; - } else { - axis.datamin = processAxisOffset(plot, axis); - } - } - } - - function processAxisOffset(plot, axis) { - var series = plot.getData(), - range = series - .filter(function(series) { - return series.xaxis === axis || series.yaxis === axis; - }) - .map(function(series) { - return plot.computeRangeForDataSeries(series, null, isValid); - }), - min = axis.direction === 'x' ? Math.min(0.1, range[0].xmin) : Math.min(0.1, range[0].ymin); - - axis.min = min; - - return min; - } - - function isValid(a) { - return a > 0; - } - - function init(plot) { - plot.hooks.processOptions.push(function (plot) { - $.each(plot.getAxes(), function (axisName, axis) { - var opts = axis.options; - if (opts.mode === 'log') { - axis.tickGenerator = function (axis) { - var noTicks = 11; - return logTickGenerator(plot, axis, noTicks); - }; - if (typeof axis.options.tickFormatter !== 'function') { - axis.options.tickFormatter = logTickFormatter; - } - axis.options.transform = opts.inverted ? invertedLogTransform : logTransform; - axis.options.inverseTransform = opts.inverted ? invertedLogInverseTransform : logInverseTransform; - axis.options.autoScaleMargin = 0; - plot.hooks.setRange.push(setDataminRange); - } else if (opts.inverted) { - axis.options.transform = invertedTransform; - axis.options.inverseTransform = invertedTransform; - } - }); - }); - } - - $.plot.plugins.push({ - init: init, - options: options, - name: 'log', - version: '0.1' - }); - - $.plot.logTicksGenerator = logTickGenerator; - $.plot.logTickFormatter = logTickFormatter; -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.navigate.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.navigate.js deleted file mode 100644 index 1c3ce37..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.navigate.js +++ /dev/null @@ -1,781 +0,0 @@ -/* Flot plugin for adding the ability to pan and zoom the plot. - -Copyright (c) 2007-2014 IOLA and Ole Laursen. -Copyright (c) 2016 Ciprian Ceteras. -Copyright (c) 2017 Raluca Portase. -Licensed under the MIT license. - -*/ - -/** -## jquery.flot.navigate.js - -This flot plugin is used for adding the ability to pan and zoom the plot. -A higher level overview is available at [interactions](interactions.md) documentation. - -The default behaviour is scrollwheel up/down to zoom in, drag -to pan. The plugin defines plot.zoom({ center }), plot.zoomOut() and -plot.pan( offset ) so you easily can add custom controls. It also fires -"plotpan" and "plotzoom" events, useful for synchronizing plots. - -The plugin supports these options: -```js - zoom: { - interactive: false, - active: false, - amount: 1.5 // 2 = 200% (zoom in), 0.5 = 50% (zoom out) - } - - pan: { - interactive: false, - active: false, - cursor: "move", // CSS mouse cursor value used when dragging, e.g. "pointer" - frameRate: 60, - mode: "smart" // enable smart pan mode - } - - xaxis: { - axisZoom: true, //zoom axis when mouse over it is allowed - plotZoom: true, //zoom axis is allowed for plot zoom - axisPan: true, //pan axis when mouse over it is allowed - plotPan: true //pan axis is allowed for plot pan - } - - yaxis: { - axisZoom: true, //zoom axis when mouse over it is allowed - plotZoom: true, //zoom axis is allowed for plot zoom - axisPan: true, //pan axis when mouse over it is allowed - plotPan: true //pan axis is allowed for plot pan - } -``` -**interactive** enables the built-in drag/click behaviour. If you enable -interactive for pan, then you'll have a basic plot that supports moving -around; the same for zoom. - -**active** is true after a touch tap on plot. This enables plot navigation. -Once activated, zoom and pan cannot be deactivated. When the plot becomes active, -"plotactivated" event is triggered. - -**amount** specifies the default amount to zoom in (so 1.5 = 150%) relative to -the current viewport. - -**cursor** is a standard CSS mouse cursor string used for visual feedback to the -user when dragging. - -**frameRate** specifies the maximum number of times per second the plot will -update itself while the user is panning around on it (set to null to disable -intermediate pans, the plot will then not update until the mouse button is -released). - -**mode** a string specifies the pan mode for mouse interaction. Accepted values: -'manual': no pan hint or direction snapping; -'smart': The graph shows pan hint bar and the pan movement will snap -to one direction when the drag direction is close to it; -'smartLock'. The graph shows pan hint bar and the pan movement will always -snap to a direction that the drag diorection started with. - -Example API usage: -```js - plot = $.plot(...); - - // zoom default amount in on the pixel ( 10, 20 ) - plot.zoom({ center: { left: 10, top: 20 } }); - - // zoom out again - plot.zoomOut({ center: { left: 10, top: 20 } }); - - // zoom 200% in on the pixel (10, 20) - plot.zoom({ amount: 2, center: { left: 10, top: 20 } }); - - // pan 100 pixels to the left (changing x-range in a positive way) and 20 down - plot.pan({ left: -100, top: 20 }) -``` - -Here, "center" specifies where the center of the zooming should happen. Note -that this is defined in pixel space, not the space of the data points (you can -use the p2c helpers on the axes in Flot to help you convert between these). - -**amount** is the amount to zoom the viewport relative to the current range, so -1 is 100% (i.e. no change), 1.5 is 150% (zoom in), 0.7 is 70% (zoom out). You -can set the default in the options. -*/ - -/* eslint-enable */ -(function($) { - 'use strict'; - - var options = { - zoom: { - interactive: false, - active: false, - amount: 1.5 // how much to zoom relative to current position, 2 = 200% (zoom in), 0.5 = 50% (zoom out) - }, - pan: { - interactive: false, - active: false, - cursor: "move", - frameRate: 60, - mode: 'smart' - }, - xaxis: { - axisZoom: true, //zoom axis when mouse over it is allowed - plotZoom: true, //zoom axis is allowed for plot zoom - axisPan: true, //pan axis when mouse over it is allowed - plotPan: true //pan axis is allowed for plot pan - }, - yaxis: { - axisZoom: true, - plotZoom: true, - axisPan: true, - plotPan: true - } - }; - - var saturated = $.plot.saturated; - var browser = $.plot.browser; - var SNAPPING_CONSTANT = $.plot.uiConstants.SNAPPING_CONSTANT; - var PANHINT_LENGTH_CONSTANT = $.plot.uiConstants.PANHINT_LENGTH_CONSTANT; - - function init(plot) { - plot.hooks.processOptions.push(initNevigation); - } - - function initNevigation(plot, options) { - var panAxes = null; - var canDrag = false; - var useManualPan = options.pan.mode === 'manual', - smartPanLock = options.pan.mode === 'smartLock', - useSmartPan = smartPanLock || options.pan.mode === 'smart'; - - function onZoomClick(e, zoomOut, amount) { - var page = browser.getPageXY(e); - - var c = plot.offset(); - c.left = page.X - c.left; - c.top = page.Y - c.top; - - var ec = plot.getPlaceholder().offset(); - ec.left = page.X - ec.left; - ec.top = page.Y - ec.top; - - var axes = plot.getXAxes().concat(plot.getYAxes()).filter(function (axis) { - var box = axis.box; - if (box !== undefined) { - return (ec.left > box.left) && (ec.left < box.left + box.width) && - (ec.top > box.top) && (ec.top < box.top + box.height); - } - }); - - if (axes.length === 0) { - axes = undefined; - } - - if (zoomOut) { - plot.zoomOut({ - center: c, - axes: axes, - amount: amount - }); - } else { - plot.zoom({ - center: c, - axes: axes, - amount: amount - }); - } - } - - var prevCursor = 'default', - panHint = null, - panTimeout = null, - plotState, - prevDragPosition = { x: 0, y: 0 }, - isPanAction = false; - - function onMouseWheel(e, delta) { - var maxAbsoluteDeltaOnMac = 1, - isMacScroll = Math.abs(e.originalEvent.deltaY) <= maxAbsoluteDeltaOnMac, - defaultNonMacScrollAmount = null, - macMagicRatio = 50, - amount = isMacScroll ? 1 + Math.abs(e.originalEvent.deltaY) / macMagicRatio : defaultNonMacScrollAmount; - - if (isPanAction) { - onDragEnd(e); - } - - if (plot.getOptions().zoom.active) { - e.preventDefault(); - onZoomClick(e, delta < 0, amount); - return false; - } - } - - plot.navigationState = function(startPageX, startPageY) { - var axes = this.getAxes(); - var result = {}; - Object.keys(axes).forEach(function(axisName) { - var axis = axes[axisName]; - result[axisName] = { - navigationOffset: { below: axis.options.offset.below || 0, - above: axis.options.offset.above || 0}, - axisMin: axis.min, - axisMax: axis.max, - diagMode: false - } - }); - - result.startPageX = startPageX || 0; - result.startPageY = startPageY || 0; - return result; - } - - function onMouseDown(e) { - canDrag = true; - } - - function onMouseUp(e) { - canDrag = false; - } - - function isLeftMouseButtonPressed(e) { - return e.button === 0; - } - - function onDragStart(e) { - if (!canDrag || !isLeftMouseButtonPressed(e)) { - return false; - } - - isPanAction = true; - var page = browser.getPageXY(e); - - var ec = plot.getPlaceholder().offset(); - ec.left = page.X - ec.left; - ec.top = page.Y - ec.top; - - panAxes = plot.getXAxes().concat(plot.getYAxes()).filter(function (axis) { - var box = axis.box; - if (box !== undefined) { - return (ec.left > box.left) && (ec.left < box.left + box.width) && - (ec.top > box.top) && (ec.top < box.top + box.height); - } - }); - - if (panAxes.length === 0) { - panAxes = undefined; - } - - var c = plot.getPlaceholder().css('cursor'); - if (c) { - prevCursor = c; - } - - plot.getPlaceholder().css('cursor', plot.getOptions().pan.cursor); - - if (useSmartPan) { - plotState = plot.navigationState(page.X, page.Y); - } else if (useManualPan) { - prevDragPosition.x = page.X; - prevDragPosition.y = page.Y; - } - } - - function onDrag(e) { - var page = browser.getPageXY(e); - var frameRate = plot.getOptions().pan.frameRate; - - if (frameRate === -1) { - if (useSmartPan) { - plot.smartPan({ - x: plotState.startPageX - page.X, - y: plotState.startPageY - page.Y - }, plotState, panAxes, false, smartPanLock); - } else if (useManualPan) { - plot.pan({ - left: prevDragPosition.x - page.X, - top: prevDragPosition.y - page.Y, - axes: panAxes - }); - prevDragPosition.x = page.X; - prevDragPosition.y = page.Y; - } - return; - } - - if (panTimeout || !frameRate) return; - - panTimeout = setTimeout(function() { - if (useSmartPan) { - plot.smartPan({ - x: plotState.startPageX - page.X, - y: plotState.startPageY - page.Y - }, plotState, panAxes, false, smartPanLock); - } else if (useManualPan) { - plot.pan({ - left: prevDragPosition.x - page.X, - top: prevDragPosition.y - page.Y, - axes: panAxes - }); - prevDragPosition.x = page.X; - prevDragPosition.y = page.Y; - } - - panTimeout = null; - }, 1 / frameRate * 1000); - } - - function onDragEnd(e) { - if (panTimeout) { - clearTimeout(panTimeout); - panTimeout = null; - } - - isPanAction = false; - var page = browser.getPageXY(e); - - plot.getPlaceholder().css('cursor', prevCursor); - - if (useSmartPan) { - plot.smartPan({ - x: plotState.startPageX - page.X, - y: plotState.startPageY - page.Y - }, plotState, panAxes, false, smartPanLock); - plot.smartPan.end(); - } else if (useManualPan) { - plot.pan({ - left: prevDragPosition.x - page.X, - top: prevDragPosition.y - page.Y, - axes: panAxes - }); - prevDragPosition.x = 0; - prevDragPosition.y = 0; - } - } - - function onDblClick(e) { - plot.activate(); - - var axes = plot.getTouchedAxis(e.clientX, e.clientY), - event; - if (axes[0]) { - event = new $.Event('re-center', { detail: { - axisTouched: axes[0] - }}); - } else { - event = new $.Event('re-center', {detail: e}); - } - plot.getPlaceholder().trigger(event); - } - - function onClick(e) { - plot.activate(); - - if (isPanAction) { - onDragEnd(e); - } - - return false; - } - - plot.activate = function() { - var o = plot.getOptions(); - if (!o.pan.active || !o.zoom.active) { - o.pan.active = true; - o.zoom.active = true; - plot.getPlaceholder().trigger("plotactivated", [plot]); - } - } - - function bindEvents(plot, eventHolder) { - var o = plot.getOptions(); - if (o.zoom.interactive) { - eventHolder.mousewheel(onMouseWheel); - } - - if (o.pan.interactive) { - plot.addEventHandler("dragstart", onDragStart, eventHolder, 0); - plot.addEventHandler("drag", onDrag, eventHolder, 0); - plot.addEventHandler("dragend", onDragEnd, eventHolder, 0); - eventHolder.bind("mousedown", onMouseDown); - eventHolder.bind("mouseup", onMouseUp); - } - - eventHolder.dblclick(onDblClick); - eventHolder.click(onClick); - } - - plot.zoomOut = function(args) { - if (!args) { - args = {}; - } - - if (!args.amount) { - args.amount = plot.getOptions().zoom.amount; - } - - args.amount = 1 / args.amount; - plot.zoom(args); - }; - - plot.zoom = function(args) { - if (!args) { - args = {}; - } - - var c = args.center, - amount = args.amount || plot.getOptions().zoom.amount, - w = plot.width(), - h = plot.height(), - axes = args.axes || plot.getAxes(); - - if (!c) { - c = { - left: w / 2, - top: h / 2 - }; - } - - var xf = c.left / w, - yf = c.top / h, - minmax = { - x: { - min: c.left - xf * w / amount, - max: c.left + (1 - xf) * w / amount - }, - y: { - min: c.top - yf * h / amount, - max: c.top + (1 - yf) * h / amount - } - }; - - for (var key in axes) { - if (!axes.hasOwnProperty(key)) { - continue; - } - - var axis = axes[key], - opts = axis.options, - min = minmax[axis.direction].min, - max = minmax[axis.direction].max, - navigationOffset = axis.options.offset; - - //skip axis without axisZoom when zooming only on certain axis or axis without plotZoom for zoom on entire plot - if ((!opts.axisZoom && args.axes) || (!args.axes && !opts.plotZoom)) { - continue; - } - - min = $.plot.saturated.saturate(axis.c2p(min)); - max = $.plot.saturated.saturate(axis.c2p(max)); - if (min > max) { - // make sure min < max - var tmp = min; - min = max; - max = tmp; - } - - var offsetBelow = $.plot.saturated.saturate(navigationOffset.below - (axis.min - min)); - var offsetAbove = $.plot.saturated.saturate(navigationOffset.above - (axis.max - max)); - opts.offset = { below: offsetBelow, above: offsetAbove }; - }; - - plot.setupGrid(true); - plot.draw(); - - if (!args.preventEvent) { - plot.getPlaceholder().trigger("plotzoom", [plot, args]); - } - }; - - plot.pan = function(args) { - var delta = { - x: +args.left, - y: +args.top - }; - - if (isNaN(delta.x)) delta.x = 0; - if (isNaN(delta.y)) delta.y = 0; - - $.each(args.axes || plot.getAxes(), function(_, axis) { - var opts = axis.options, - d = delta[axis.direction]; - - //skip axis without axisPan when panning only on certain axis or axis without plotPan for pan the entire plot - if ((!opts.axisPan && args.axes) || (!opts.plotPan && !args.axes)) { - return; - } - - if (d !== 0) { - var navigationOffsetBelow = saturated.saturate(axis.c2p(axis.p2c(axis.min) + d) - axis.c2p(axis.p2c(axis.min))), - navigationOffsetAbove = saturated.saturate(axis.c2p(axis.p2c(axis.max) + d) - axis.c2p(axis.p2c(axis.max))); - - if (!isFinite(navigationOffsetBelow)) { - navigationOffsetBelow = 0; - } - - if (!isFinite(navigationOffsetAbove)) { - navigationOffsetAbove = 0; - } - - opts.offset = { - below: saturated.saturate(navigationOffsetBelow + (opts.offset.below || 0)), - above: saturated.saturate(navigationOffsetAbove + (opts.offset.above || 0)) - }; - } - }); - - plot.setupGrid(true); - plot.draw(); - if (!args.preventEvent) { - plot.getPlaceholder().trigger("plotpan", [plot, args]); - } - }; - - plot.recenter = function(args) { - $.each(args.axes || plot.getAxes(), function(_, axis) { - if (args.axes) { - if (this.direction === 'x') { - axis.options.offset = { below: 0 }; - } else if (this.direction === 'y') { - axis.options.offset = { above: 0 }; - } - } else { - axis.options.offset = { below: 0, above: 0 }; - } - }); - plot.setupGrid(true); - plot.draw(); - }; - - var shouldSnap = function(delta) { - return (Math.abs(delta.y) < SNAPPING_CONSTANT && Math.abs(delta.x) >= SNAPPING_CONSTANT) || - (Math.abs(delta.x) < SNAPPING_CONSTANT && Math.abs(delta.y) >= SNAPPING_CONSTANT); - } - - // adjust delta so the pan action is constrained on the vertical or horizontal direction - // it the movements in the other direction are small - var adjustDeltaToSnap = function(delta) { - if (Math.abs(delta.x) < SNAPPING_CONSTANT && Math.abs(delta.y) >= SNAPPING_CONSTANT) { - return {x: 0, y: delta.y}; - } - - if (Math.abs(delta.y) < SNAPPING_CONSTANT && Math.abs(delta.x) >= SNAPPING_CONSTANT) { - return {x: delta.x, y: 0}; - } - - return delta; - } - - var lockedDirection = null; - var lockDeltaDirection = function(delta) { - if (!lockedDirection && Math.max(Math.abs(delta.x), Math.abs(delta.y)) >= SNAPPING_CONSTANT) { - lockedDirection = Math.abs(delta.x) < Math.abs(delta.y) ? 'y' : 'x'; - } - - switch (lockedDirection) { - case 'x': - return { x: delta.x, y: 0 }; - case 'y': - return { x: 0, y: delta.y }; - default: - return { x: 0, y: 0 }; - } - } - - var isDiagonalMode = function(delta) { - if (Math.abs(delta.x) > 0 && Math.abs(delta.y) > 0) { - return true; - } - return false; - } - - var restoreAxisOffset = function(axes, initialState, delta) { - var axis; - Object.keys(axes).forEach(function(axisName) { - axis = axes[axisName]; - if (delta[axis.direction] === 0) { - axis.options.offset.below = initialState[axisName].navigationOffset.below; - axis.options.offset.above = initialState[axisName].navigationOffset.above; - } - }); - } - - var prevDelta = { x: 0, y: 0 }; - plot.smartPan = function(delta, initialState, panAxes, preventEvent, smartLock) { - var snap = smartLock ? true : shouldSnap(delta), - axes = plot.getAxes(), - opts; - delta = smartLock ? lockDeltaDirection(delta) : adjustDeltaToSnap(delta); - - if (isDiagonalMode(delta)) { - initialState.diagMode = true; - } - - if (snap && initialState.diagMode === true) { - initialState.diagMode = false; - restoreAxisOffset(axes, initialState, delta); - } - - if (snap) { - panHint = { - start: { - x: initialState.startPageX - plot.offset().left + plot.getPlotOffset().left, - y: initialState.startPageY - plot.offset().top + plot.getPlotOffset().top - }, - end: { - x: initialState.startPageX - delta.x - plot.offset().left + plot.getPlotOffset().left, - y: initialState.startPageY - delta.y - plot.offset().top + plot.getPlotOffset().top - } - } - } else { - panHint = { - start: { - x: initialState.startPageX - plot.offset().left + plot.getPlotOffset().left, - y: initialState.startPageY - plot.offset().top + plot.getPlotOffset().top - }, - end: false - } - } - - if (isNaN(delta.x)) delta.x = 0; - if (isNaN(delta.y)) delta.y = 0; - - if (panAxes) { - axes = panAxes; - } - - var axis, axisMin, axisMax, p, d; - Object.keys(axes).forEach(function(axisName) { - axis = axes[axisName]; - axisMin = axis.min; - axisMax = axis.max; - opts = axis.options; - - d = delta[axis.direction]; - p = prevDelta[axis.direction]; - - //skip axis without axisPan when panning only on certain axis or axis without plotPan for pan the entire plot - if ((!opts.axisPan && panAxes) || (!panAxes && !opts.plotPan)) { - return; - } - - if (d !== 0) { - var navigationOffsetBelow = saturated.saturate(axis.c2p(axis.p2c(axisMin) - (p - d)) - axis.c2p(axis.p2c(axisMin))), - navigationOffsetAbove = saturated.saturate(axis.c2p(axis.p2c(axisMax) - (p - d)) - axis.c2p(axis.p2c(axisMax))); - - if (!isFinite(navigationOffsetBelow)) { - navigationOffsetBelow = 0; - } - - if (!isFinite(navigationOffsetAbove)) { - navigationOffsetAbove = 0; - } - - axis.options.offset.below = saturated.saturate(navigationOffsetBelow + (axis.options.offset.below || 0)); - axis.options.offset.above = saturated.saturate(navigationOffsetAbove + (axis.options.offset.above || 0)); - } - }); - - prevDelta = delta; - plot.setupGrid(true); - plot.draw(); - - if (!preventEvent) { - plot.getPlaceholder().trigger("plotpan", [plot, delta, panAxes, initialState]); - } - }; - - plot.smartPan.end = function() { - panHint = null; - lockedDirection = null; - prevDelta = { x: 0, y: 0 }; - plot.triggerRedrawOverlay(); - } - - function shutdown(plot, eventHolder) { - eventHolder.unbind("mousewheel", onMouseWheel); - eventHolder.unbind("mousedown", onMouseDown); - eventHolder.unbind("mouseup", onMouseUp); - eventHolder.unbind("dragstart", onDragStart); - eventHolder.unbind("drag", onDrag); - eventHolder.unbind("dragend", onDragEnd); - eventHolder.unbind("dblclick", onDblClick); - eventHolder.unbind("click", onClick); - - if (panTimeout) clearTimeout(panTimeout); - } - - function drawOverlay(plot, ctx) { - if (panHint) { - ctx.strokeStyle = 'rgba(96, 160, 208, 0.7)'; - ctx.lineWidth = 2; - ctx.lineJoin = "round"; - var startx = Math.round(panHint.start.x), - starty = Math.round(panHint.start.y), - endx, endy; - - if (panAxes) { - if (panAxes[0].direction === 'x') { - endy = Math.round(panHint.start.y); - endx = Math.round(panHint.end.x); - } else if (panAxes[0].direction === 'y') { - endx = Math.round(panHint.start.x); - endy = Math.round(panHint.end.y); - } - } else { - endx = Math.round(panHint.end.x); - endy = Math.round(panHint.end.y); - } - - ctx.beginPath(); - - if (panHint.end === false) { - ctx.moveTo(startx, starty - PANHINT_LENGTH_CONSTANT); - ctx.lineTo(startx, starty + PANHINT_LENGTH_CONSTANT); - - ctx.moveTo(startx + PANHINT_LENGTH_CONSTANT, starty); - ctx.lineTo(startx - PANHINT_LENGTH_CONSTANT, starty); - } else { - var dirX = starty === endy; - - ctx.moveTo(startx - (dirX ? 0 : PANHINT_LENGTH_CONSTANT), starty - (dirX ? PANHINT_LENGTH_CONSTANT : 0)); - ctx.lineTo(startx + (dirX ? 0 : PANHINT_LENGTH_CONSTANT), starty + (dirX ? PANHINT_LENGTH_CONSTANT : 0)); - - ctx.moveTo(startx, starty); - ctx.lineTo(endx, endy); - - ctx.moveTo(endx - (dirX ? 0 : PANHINT_LENGTH_CONSTANT), endy - (dirX ? PANHINT_LENGTH_CONSTANT : 0)); - ctx.lineTo(endx + (dirX ? 0 : PANHINT_LENGTH_CONSTANT), endy + (dirX ? PANHINT_LENGTH_CONSTANT : 0)); - } - - ctx.stroke(); - } - } - - plot.getTouchedAxis = function(touchPointX, touchPointY) { - var ec = plot.getPlaceholder().offset(); - ec.left = touchPointX - ec.left; - ec.top = touchPointY - ec.top; - - var axis = plot.getXAxes().concat(plot.getYAxes()).filter(function (axis) { - var box = axis.box; - if (box !== undefined) { - return (ec.left > box.left) && (ec.left < box.left + box.width) && - (ec.top > box.top) && (ec.top < box.top + box.height); - } - }); - - return axis; - } - - plot.hooks.drawOverlay.push(drawOverlay); - plot.hooks.bindEvents.push(bindEvents); - plot.hooks.shutdown.push(shutdown); - } - - $.plot.plugins.push({ - init: init, - options: options, - name: 'navigate', - version: '1.3' - }); -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.pie.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.pie.js deleted file mode 100644 index ec734a1..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.pie.js +++ /dev/null @@ -1,786 +0,0 @@ -/* Flot plugin for rendering pie charts. - -Copyright (c) 2007-2014 IOLA and Ole Laursen. -Licensed under the MIT license. - -The plugin assumes that each series has a single data value, and that each -value is a positive integer or zero. Negative numbers don't make sense for a -pie chart, and have unpredictable results. The values do NOT need to be -passed in as percentages; the plugin will calculate the total and per-slice -percentages internally. - -* Created by Brian Medendorp - -* Updated with contributions from btburnett3, Anthony Aragues and Xavi Ivars - -The plugin supports these options: - - series: { - pie: { - show: true/false - radius: 0-1 for percentage of fullsize, or a specified pixel length, or 'auto' - innerRadius: 0-1 for percentage of fullsize or a specified pixel length, for creating a donut effect - startAngle: 0-2 factor of PI used for starting angle (in radians) i.e 3/2 starts at the top, 0 and 2 have the same result - tilt: 0-1 for percentage to tilt the pie, where 1 is no tilt, and 0 is completely flat (nothing will show) - offset: { - top: integer value to move the pie up or down - left: integer value to move the pie left or right, or 'auto' - }, - stroke: { - color: any hexidecimal color value (other formats may or may not work, so best to stick with something like '#FFF') - width: integer pixel width of the stroke - }, - label: { - show: true/false, or 'auto' - formatter: a user-defined function that modifies the text/style of the label text - radius: 0-1 for percentage of fullsize, or a specified pixel length - background: { - color: any hexidecimal color value (other formats may or may not work, so best to stick with something like '#000') - opacity: 0-1 - }, - threshold: 0-1 for the percentage value at which to hide labels (if they're too small) - }, - combine: { - threshold: 0-1 for the percentage value at which to combine slices (if they're too small) - color: any hexidecimal color value (other formats may or may not work, so best to stick with something like '#CCC'), if null, the plugin will automatically use the color of the first slice to be combined - label: any text value of what the combined slice should be labeled - } - highlight: { - opacity: 0-1 - } - } - } - -More detail and specific examples can be found in the included HTML file. - -*/ - -(function($) { - // Maximum redraw attempts when fitting labels within the plot - - var REDRAW_ATTEMPTS = 10; - - // Factor by which to shrink the pie when fitting labels within the plot - - var REDRAW_SHRINK = 0.95; - - function init(plot) { - var canvas = null, - target = null, - options = null, - maxRadius = null, - centerLeft = null, - centerTop = null, - processed = false, - ctx = null; - - // interactive variables - - var highlights = []; - - // add hook to determine if pie plugin in enabled, and then perform necessary operations - - plot.hooks.processOptions.push(function(plot, options) { - if (options.series.pie.show) { - options.grid.show = false; - - // set labels.show - - if (options.series.pie.label.show === "auto") { - if (options.legend.show) { - options.series.pie.label.show = false; - } else { - options.series.pie.label.show = true; - } - } - - // set radius - - if (options.series.pie.radius === "auto") { - if (options.series.pie.label.show) { - options.series.pie.radius = 3 / 4; - } else { - options.series.pie.radius = 1; - } - } - - // ensure sane tilt - - if (options.series.pie.tilt > 1) { - options.series.pie.tilt = 1; - } else if (options.series.pie.tilt < 0) { - options.series.pie.tilt = 0; - } - } - }); - - plot.hooks.bindEvents.push(function(plot, eventHolder) { - var options = plot.getOptions(); - if (options.series.pie.show) { - if (options.grid.hoverable) { - eventHolder.unbind("mousemove").mousemove(onMouseMove); - } - if (options.grid.clickable) { - eventHolder.unbind("click").click(onClick); - } - } - }); - - plot.hooks.processDatapoints.push(function(plot, series, data, datapoints) { - var options = plot.getOptions(); - if (options.series.pie.show) { - processDatapoints(plot, series, data, datapoints); - } - }); - - plot.hooks.drawOverlay.push(function(plot, octx) { - var options = plot.getOptions(); - if (options.series.pie.show) { - drawOverlay(plot, octx); - } - }); - - plot.hooks.draw.push(function(plot, newCtx) { - var options = plot.getOptions(); - if (options.series.pie.show) { - draw(plot, newCtx); - } - }); - - function processDatapoints(plot, series, datapoints) { - if (!processed) { - processed = true; - canvas = plot.getCanvas(); - target = $(canvas).parent(); - options = plot.getOptions(); - plot.setData(combine(plot.getData())); - } - } - - function combine(data) { - var total = 0, - combined = 0, - numCombined = 0, - color = options.series.pie.combine.color, - newdata = [], - i, - value; - - // Fix up the raw data from Flot, ensuring the data is numeric - - for (i = 0; i < data.length; ++i) { - value = data[i].data; - - // If the data is an array, we'll assume that it's a standard - // Flot x-y pair, and are concerned only with the second value. - - // Note how we use the original array, rather than creating a - // new one; this is more efficient and preserves any extra data - // that the user may have stored in higher indexes. - - if ($.isArray(value) && value.length === 1) { - value = value[0]; - } - - if ($.isArray(value)) { - // Equivalent to $.isNumeric() but compatible with jQuery < 1.7 - if (!isNaN(parseFloat(value[1])) && isFinite(value[1])) { - value[1] = +value[1]; - } else { - value[1] = 0; - } - } else if (!isNaN(parseFloat(value)) && isFinite(value)) { - value = [1, +value]; - } else { - value = [1, 0]; - } - - data[i].data = [value]; - } - - // Sum up all the slices, so we can calculate percentages for each - - for (i = 0; i < data.length; ++i) { - total += data[i].data[0][1]; - } - - // Count the number of slices with percentages below the combine - // threshold; if it turns out to be just one, we won't combine. - - for (i = 0; i < data.length; ++i) { - value = data[i].data[0][1]; - if (value / total <= options.series.pie.combine.threshold) { - combined += value; - numCombined++; - if (!color) { - color = data[i].color; - } - } - } - - for (i = 0; i < data.length; ++i) { - value = data[i].data[0][1]; - if (numCombined < 2 || value / total > options.series.pie.combine.threshold) { - newdata.push( - $.extend(data[i], { /* extend to allow keeping all other original data values - and using them e.g. in labelFormatter. */ - data: [[1, value]], - color: data[i].color, - label: data[i].label, - angle: value * Math.PI * 2 / total, - percent: value / (total / 100) - }) - ); - } - } - - if (numCombined > 1) { - newdata.push({ - data: [[1, combined]], - color: color, - label: options.series.pie.combine.label, - angle: combined * Math.PI * 2 / total, - percent: combined / (total / 100) - }); - } - - return newdata; - } - - function draw(plot, newCtx) { - if (!target) { - return; // if no series were passed - } - - var canvasWidth = plot.getPlaceholder().width(), - canvasHeight = plot.getPlaceholder().height(), - legendWidth = target.children().filter(".legend").children().width() || 0; - - ctx = newCtx; - - // WARNING: HACK! REWRITE THIS CODE AS SOON AS POSSIBLE! - - // When combining smaller slices into an 'other' slice, we need to - // add a new series. Since Flot gives plugins no way to modify the - // list of series, the pie plugin uses a hack where the first call - // to processDatapoints results in a call to setData with the new - // list of series, then subsequent processDatapoints do nothing. - - // The plugin-global 'processed' flag is used to control this hack; - // it starts out false, and is set to true after the first call to - // processDatapoints. - - // Unfortunately this turns future setData calls into no-ops; they - // call processDatapoints, the flag is true, and nothing happens. - - // To fix this we'll set the flag back to false here in draw, when - // all series have been processed, so the next sequence of calls to - // processDatapoints once again starts out with a slice-combine. - // This is really a hack; in 0.9 we need to give plugins a proper - // way to modify series before any processing begins. - - processed = false; - - // calculate maximum radius and center point - maxRadius = Math.min(canvasWidth, canvasHeight / options.series.pie.tilt) / 2; - centerTop = canvasHeight / 2 + options.series.pie.offset.top; - centerLeft = canvasWidth / 2; - - if (options.series.pie.offset.left === "auto") { - if (options.legend.position.match("w")) { - centerLeft += legendWidth / 2; - } else { - centerLeft -= legendWidth / 2; - } - if (centerLeft < maxRadius) { - centerLeft = maxRadius; - } else if (centerLeft > canvasWidth - maxRadius) { - centerLeft = canvasWidth - maxRadius; - } - } else { - centerLeft += options.series.pie.offset.left; - } - - var slices = plot.getData(), - attempts = 0; - - // Keep shrinking the pie's radius until drawPie returns true, - // indicating that all the labels fit, or we try too many times. - do { - if (attempts > 0) { - maxRadius *= REDRAW_SHRINK; - } - attempts += 1; - clear(); - if (options.series.pie.tilt <= 0.8) { - drawShadow(); - } - } while (!drawPie() && attempts < REDRAW_ATTEMPTS) - - if (attempts >= REDRAW_ATTEMPTS) { - clear(); - target.prepend("
Could not draw pie with labels contained inside canvas
"); - } - - if (plot.setSeries && plot.insertLegend) { - plot.setSeries(slices); - plot.insertLegend(); - } - - // we're actually done at this point, just defining internal functions at this point - function clear() { - ctx.clearRect(0, 0, canvasWidth, canvasHeight); - target.children().filter(".pieLabel, .pieLabelBackground").remove(); - } - - function drawShadow() { - var shadowLeft = options.series.pie.shadow.left; - var shadowTop = options.series.pie.shadow.top; - var edge = 10; - var alpha = options.series.pie.shadow.alpha; - var radius = options.series.pie.radius > 1 ? options.series.pie.radius : maxRadius * options.series.pie.radius; - - if (radius >= canvasWidth / 2 - shadowLeft || radius * options.series.pie.tilt >= canvasHeight / 2 - shadowTop || radius <= edge) { - return; // shadow would be outside canvas, so don't draw it - } - - ctx.save(); - ctx.translate(shadowLeft, shadowTop); - ctx.globalAlpha = alpha; - ctx.fillStyle = "#000"; - - // center and rotate to starting position - ctx.translate(centerLeft, centerTop); - ctx.scale(1, options.series.pie.tilt); - - //radius -= edge; - for (var i = 1; i <= edge; i++) { - ctx.beginPath(); - ctx.arc(0, 0, radius, 0, Math.PI * 2, false); - ctx.fill(); - radius -= i; - } - - ctx.restore(); - } - - function drawPie() { - var startAngle = Math.PI * options.series.pie.startAngle; - var radius = options.series.pie.radius > 1 ? options.series.pie.radius : maxRadius * options.series.pie.radius; - var i; - // center and rotate to starting position - - ctx.save(); - ctx.translate(centerLeft, centerTop); - ctx.scale(1, options.series.pie.tilt); - //ctx.rotate(startAngle); // start at top; -- This doesn't work properly in Opera - - // draw slices - ctx.save(); - - var currentAngle = startAngle; - for (i = 0; i < slices.length; ++i) { - slices[i].startAngle = currentAngle; - drawSlice(slices[i].angle, slices[i].color, true); - } - - ctx.restore(); - - // draw slice outlines - if (options.series.pie.stroke.width > 0) { - ctx.save(); - ctx.lineWidth = options.series.pie.stroke.width; - currentAngle = startAngle; - for (i = 0; i < slices.length; ++i) { - drawSlice(slices[i].angle, options.series.pie.stroke.color, false); - } - - ctx.restore(); - } - - // draw donut hole - drawDonutHole(ctx); - - ctx.restore(); - - // Draw the labels, returning true if they fit within the plot - if (options.series.pie.label.show) { - return drawLabels(); - } else return true; - - function drawSlice(angle, color, fill) { - if (angle <= 0 || isNaN(angle)) { - return; - } - - if (fill) { - ctx.fillStyle = color; - } else { - ctx.strokeStyle = color; - ctx.lineJoin = "round"; - } - - ctx.beginPath(); - if (Math.abs(angle - Math.PI * 2) > 0.000000001) { - ctx.moveTo(0, 0); // Center of the pie - } - - //ctx.arc(0, 0, radius, 0, angle, false); // This doesn't work properly in Opera - ctx.arc(0, 0, radius, currentAngle, currentAngle + angle / 2, false); - ctx.arc(0, 0, radius, currentAngle + angle / 2, currentAngle + angle, false); - ctx.closePath(); - //ctx.rotate(angle); // This doesn't work properly in Opera - currentAngle += angle; - - if (fill) { - ctx.fill(); - } else { - ctx.stroke(); - } - } - - function drawLabels() { - var currentAngle = startAngle; - var radius = options.series.pie.label.radius > 1 ? options.series.pie.label.radius : maxRadius * options.series.pie.label.radius; - - for (var i = 0; i < slices.length; ++i) { - if (slices[i].percent >= options.series.pie.label.threshold * 100) { - if (!drawLabel(slices[i], currentAngle, i)) { - return false; - } - } - currentAngle += slices[i].angle; - } - - return true; - - function drawLabel(slice, startAngle, index) { - if (slice.data[0][1] === 0) { - return true; - } - - // format label text - var lf = options.legend.labelFormatter, text, plf = options.series.pie.label.formatter; - - if (lf) { - text = lf(slice.label, slice); - } else { - text = slice.label; - } - - if (plf) { - text = plf(text, slice); - } - - var halfAngle = ((startAngle + slice.angle) + startAngle) / 2; - var x = centerLeft + Math.round(Math.cos(halfAngle) * radius); - var y = centerTop + Math.round(Math.sin(halfAngle) * radius) * options.series.pie.tilt; - - var html = "" + text + ""; - target.append(html); - - var label = target.children("#pieLabel" + index); - var labelTop = (y - label.height() / 2); - var labelLeft = (x - label.width() / 2); - - label.css("top", labelTop); - label.css("left", labelLeft); - - // check to make sure that the label is not outside the canvas - if (0 - labelTop > 0 || 0 - labelLeft > 0 || canvasHeight - (labelTop + label.height()) < 0 || canvasWidth - (labelLeft + label.width()) < 0) { - return false; - } - - if (options.series.pie.label.background.opacity !== 0) { - // put in the transparent background separately to avoid blended labels and label boxes - var c = options.series.pie.label.background.color; - if (c == null) { - c = slice.color; - } - - var pos = "top:" + labelTop + "px;left:" + labelLeft + "px;"; - $("
") - .css("opacity", options.series.pie.label.background.opacity) - .insertBefore(label); - } - - return true; - } // end individual label function - } // end drawLabels function - } // end drawPie function - } // end draw function - - // Placed here because it needs to be accessed from multiple locations - - function drawDonutHole(layer) { - if (options.series.pie.innerRadius > 0) { - // subtract the center - layer.save(); - var innerRadius = options.series.pie.innerRadius > 1 ? options.series.pie.innerRadius : maxRadius * options.series.pie.innerRadius; - layer.globalCompositeOperation = "destination-out"; // this does not work with excanvas, but it will fall back to using the stroke color - layer.beginPath(); - layer.fillStyle = options.series.pie.stroke.color; - layer.arc(0, 0, innerRadius, 0, Math.PI * 2, false); - layer.fill(); - layer.closePath(); - layer.restore(); - - // add inner stroke - layer.save(); - layer.beginPath(); - layer.strokeStyle = options.series.pie.stroke.color; - layer.arc(0, 0, innerRadius, 0, Math.PI * 2, false); - layer.stroke(); - layer.closePath(); - layer.restore(); - - // TODO: add extra shadow inside hole (with a mask) if the pie is tilted. - } - } - - //-- Additional Interactive related functions -- - - function isPointInPoly(poly, pt) { - for (var c = false, i = -1, l = poly.length, j = l - 1; ++i < l; j = i) { - ((poly[i][1] <= pt[1] && pt[1] < poly[j][1]) || - (poly[j][1] <= pt[1] && pt[1] < poly[i][1])) && - (pt[0] < (poly[j][0] - poly[i][0]) * (pt[1] - poly[i][1]) / (poly[j][1] - poly[i][1]) + poly[i][0]) && - (c = !c); - } - return c; - } - - function findNearbySlice(mouseX, mouseY) { - var slices = plot.getData(), - options = plot.getOptions(), - radius = options.series.pie.radius > 1 ? options.series.pie.radius : maxRadius * options.series.pie.radius, - x, y; - - for (var i = 0; i < slices.length; ++i) { - var s = slices[i]; - if (s.pie.show) { - ctx.save(); - ctx.beginPath(); - ctx.moveTo(0, 0); // Center of the pie - //ctx.scale(1, options.series.pie.tilt); // this actually seems to break everything when here. - ctx.arc(0, 0, radius, s.startAngle, s.startAngle + s.angle / 2, false); - ctx.arc(0, 0, radius, s.startAngle + s.angle / 2, s.startAngle + s.angle, false); - ctx.closePath(); - x = mouseX - centerLeft; - y = mouseY - centerTop; - - if (ctx.isPointInPath) { - if (ctx.isPointInPath(mouseX - centerLeft, mouseY - centerTop)) { - ctx.restore(); - return { - datapoint: [s.percent, s.data], - dataIndex: 0, - series: s, - seriesIndex: i - }; - } - } else { - // excanvas for IE doesn;t support isPointInPath, this is a workaround. - var p1X = radius * Math.cos(s.startAngle), - p1Y = radius * Math.sin(s.startAngle), - p2X = radius * Math.cos(s.startAngle + s.angle / 4), - p2Y = radius * Math.sin(s.startAngle + s.angle / 4), - p3X = radius * Math.cos(s.startAngle + s.angle / 2), - p3Y = radius * Math.sin(s.startAngle + s.angle / 2), - p4X = radius * Math.cos(s.startAngle + s.angle / 1.5), - p4Y = radius * Math.sin(s.startAngle + s.angle / 1.5), - p5X = radius * Math.cos(s.startAngle + s.angle), - p5Y = radius * Math.sin(s.startAngle + s.angle), - arrPoly = [[0, 0], [p1X, p1Y], [p2X, p2Y], [p3X, p3Y], [p4X, p4Y], [p5X, p5Y]], - arrPoint = [x, y]; - - // TODO: perhaps do some mathmatical trickery here with the Y-coordinate to compensate for pie tilt? - - if (isPointInPoly(arrPoly, arrPoint)) { - ctx.restore(); - return { - datapoint: [s.percent, s.data], - dataIndex: 0, - series: s, - seriesIndex: i - }; - } - } - - ctx.restore(); - } - } - - return null; - } - - function onMouseMove(e) { - triggerClickHoverEvent("plothover", e); - } - - function onClick(e) { - triggerClickHoverEvent("plotclick", e); - } - - // trigger click or hover event (they send the same parameters so we share their code) - - function triggerClickHoverEvent(eventname, e) { - var offset = plot.offset(); - var canvasX = parseInt(e.pageX - offset.left); - var canvasY = parseInt(e.pageY - offset.top); - var item = findNearbySlice(canvasX, canvasY); - - if (options.grid.autoHighlight) { - // clear auto-highlights - for (var i = 0; i < highlights.length; ++i) { - var h = highlights[i]; - if (h.auto === eventname && !(item && h.series === item.series)) { - unhighlight(h.series); - } - } - } - - // highlight the slice - - if (item) { - highlight(item.series, eventname); - } - - // trigger any hover bind events - - var pos = { pageX: e.pageX, pageY: e.pageY }; - target.trigger(eventname, [pos, item]); - } - - function highlight(s, auto) { - //if (typeof s == "number") { - // s = series[s]; - //} - - var i = indexOfHighlight(s); - - if (i === -1) { - highlights.push({ series: s, auto: auto }); - plot.triggerRedrawOverlay(); - } else if (!auto) { - highlights[i].auto = false; - } - } - - function unhighlight(s) { - if (s == null) { - highlights = []; - plot.triggerRedrawOverlay(); - } - - //if (typeof s == "number") { - // s = series[s]; - //} - - var i = indexOfHighlight(s); - - if (i !== -1) { - highlights.splice(i, 1); - plot.triggerRedrawOverlay(); - } - } - - function indexOfHighlight(s) { - for (var i = 0; i < highlights.length; ++i) { - var h = highlights[i]; - if (h.series === s) { - return i; - } - } - return -1; - } - - function drawOverlay(plot, octx) { - var options = plot.getOptions(); - var radius = options.series.pie.radius > 1 ? options.series.pie.radius : maxRadius * options.series.pie.radius; - - octx.save(); - octx.translate(centerLeft, centerTop); - octx.scale(1, options.series.pie.tilt); - - for (var i = 0; i < highlights.length; ++i) { - drawHighlight(highlights[i].series); - } - - drawDonutHole(octx); - - octx.restore(); - - function drawHighlight(series) { - if (series.angle <= 0 || isNaN(series.angle)) { - return; - } - - //octx.fillStyle = parseColor(options.series.pie.highlight.color).scale(null, null, null, options.series.pie.highlight.opacity).toString(); - octx.fillStyle = "rgba(255, 255, 255, " + options.series.pie.highlight.opacity + ")"; // this is temporary until we have access to parseColor - octx.beginPath(); - if (Math.abs(series.angle - Math.PI * 2) > 0.000000001) { - octx.moveTo(0, 0); // Center of the pie - } - octx.arc(0, 0, radius, series.startAngle, series.startAngle + series.angle / 2, false); - octx.arc(0, 0, radius, series.startAngle + series.angle / 2, series.startAngle + series.angle, false); - octx.closePath(); - octx.fill(); - } - } - } // end init (plugin body) - - // define pie specific options and their default values - var options = { - series: { - pie: { - show: false, - radius: "auto", // actual radius of the visible pie (based on full calculated radius if <=1, or hard pixel value) - innerRadius: 0, /* for donut */ - startAngle: 3 / 2, - tilt: 1, - shadow: { - left: 5, // shadow left offset - top: 15, // shadow top offset - alpha: 0.02 // shadow alpha - }, - offset: { - top: 0, - left: "auto" - }, - stroke: { - color: "#fff", - width: 1 - }, - label: { - show: "auto", - formatter: function(label, slice) { - return "
" + label + "
" + Math.round(slice.percent) + "%
"; - }, // formatter function - radius: 1, // radius at which to place the labels (based on full calculated radius if <=1, or hard pixel value) - background: { - color: null, - opacity: 0 - }, - threshold: 0 // percentage at which to hide the label (i.e. the slice is too narrow) - }, - combine: { - threshold: -1, // percentage at which to combine little slices into one larger slice - color: null, // color to give the new slice (auto-generated if null) - label: "Other" // label to give the new slice - }, - highlight: { - //color: "#fff", // will add this functionality once parseColor is available - opacity: 0.5 - } - } - } - }; - - $.plot.plugins.push({ - init: init, - options: options, - name: "pie", - version: "1.1" - }); -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.resize.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.resize.js deleted file mode 100644 index 930c68e..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.resize.js +++ /dev/null @@ -1,60 +0,0 @@ -/* eslint-disable */ -/* Flot plugin for automatically redrawing plots as the placeholder resizes. - -Copyright (c) 2007-2014 IOLA and Ole Laursen. -Licensed under the MIT license. - -It works by listening for changes on the placeholder div (through the jQuery -resize event plugin) - if the size changes, it will redraw the plot. - -There are no options. If you need to disable the plugin for some plots, you -can just fix the size of their placeholders. - -*/ - -/* Inline dependency: - * jQuery resize event - v1.1 - 3/14/2010 - * http://benalman.com/projects/jquery-resize-plugin/ - * - * Copyright (c) 2010 "Cowboy" Ben Alman - * Dual licensed under the MIT and GPL licenses. - * http://benalman.com/about/license/ - */ -(function($,e,t){"$:nomunge";var i=[],n=$.resize=$.extend($.resize,{}),a,r=false,s="setTimeout",u="resize",m=u+"-special-event",o="pendingDelay",l="activeDelay",f="throttleWindow";n[o]=200;n[l]=20;n[f]=true;$.event.special[u]={setup:function(){if(!n[f]&&this[s]){return false}var e=$(this);i.push(this);e.data(m,{w:e.width(),h:e.height()});if(i.length===1){a=t;h()}},teardown:function(){if(!n[f]&&this[s]){return false}var e=$(this);for(var t=i.length-1;t>=0;t--){if(i[t]==this){i.splice(t,1);break}}e.removeData(m);if(!i.length){if(r){cancelAnimationFrame(a)}else{clearTimeout(a)}a=null}},add:function(e){if(!n[f]&&this[s]){return false}var i;function a(e,n,a){var r=$(this),s=r.data(m)||{};s.w=n!==t?n:r.width();s.h=a!==t?a:r.height();i.apply(this,arguments)}if($.isFunction(e)){i=e;return a}else{i=e.handler;e.handler=a}}};function h(t){if(r===true){r=t||1}for(var s=i.length-1;s>=0;s--){var l=$(i[s]);if(l[0]==e||l.is(":visible")){var f=l.width(),c=l.height(),d=l.data(m);if(d&&(f!==d.w||c!==d.h)){l.trigger(u,[d.w=f,d.h=c]);r=t||true}}else{d=l.data(m);d.w=0;d.h=0}}if(a!==null){if(r&&(t==null||t-r<1e3)){a=e.requestAnimationFrame(h)}else{a=setTimeout(h,n[o]);r=false}}}if(!e.requestAnimationFrame){e.requestAnimationFrame=function(){return e.webkitRequestAnimationFrame||e.mozRequestAnimationFrame||e.oRequestAnimationFrame||e.msRequestAnimationFrame||function(t,i){return e.setTimeout(function(){t((new Date).getTime())},n[l])}}()}if(!e.cancelAnimationFrame){e.cancelAnimationFrame=function(){return e.webkitCancelRequestAnimationFrame||e.mozCancelRequestAnimationFrame||e.oCancelRequestAnimationFrame||e.msCancelRequestAnimationFrame||clearTimeout}()}})(jQuery,this); - -/* eslint-enable */ -(function ($) { - var options = { }; // no options - - function init(plot) { - function onResize() { - var placeholder = plot.getPlaceholder(); - - // somebody might have hidden us and we can't plot - // when we don't have the dimensions - if (placeholder.width() === 0 || placeholder.height() === 0) return; - - plot.resize(); - plot.setupGrid(); - plot.draw(); - } - - function bindEvents(plot, eventHolder) { - plot.getPlaceholder().resize(onResize); - } - - function shutdown(plot, eventHolder) { - plot.getPlaceholder().unbind("resize", onResize); - } - - plot.hooks.bindEvents.push(bindEvents); - plot.hooks.shutdown.push(shutdown); - } - - $.plot.plugins.push({ - init: init, - options: options, - name: 'resize', - version: '1.0' - }); -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.saturated.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.saturated.js deleted file mode 100644 index 34b9c50..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.saturated.js +++ /dev/null @@ -1,43 +0,0 @@ -(function ($) { - 'use strict'; - var saturated = { - saturate: function (a) { - if (a === Infinity) { - return Number.MAX_VALUE; - } - - if (a === -Infinity) { - return -Number.MAX_VALUE; - } - - return a; - }, - delta: function(min, max, noTicks) { - return ((max - min) / noTicks) === Infinity ? (max / noTicks - min / noTicks) : (max - min) / noTicks - }, - multiply: function (a, b) { - return saturated.saturate(a * b); - }, - // returns c * bInt * a. Beahves properly in the case where c is negative - // and bInt * a is bigger that Number.MAX_VALUE (Infinity) - multiplyAdd: function (a, bInt, c) { - if (isFinite(a * bInt)) { - return saturated.saturate(a * bInt + c); - } else { - var result = c; - - for (var i = 0; i < bInt; i++) { - result += a; - } - - return saturated.saturate(result); - } - }, - // round to nearby lower multiple of base - floorInBase: function(n, base) { - return base * Math.floor(n / base); - } - }; - - $.plot.saturated = saturated; -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.selection.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.selection.js deleted file mode 100644 index c14625a..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.selection.js +++ /dev/null @@ -1,504 +0,0 @@ -/* Flot plugin for selecting regions of a plot. - -Copyright (c) 2007-2014 IOLA and Ole Laursen. -Licensed under the MIT license. - -The plugin supports these options: - -selection: { - mode: null or "x" or "y" or "xy" or "smart", - color: color, - shape: "round" or "miter" or "bevel", - minSize: number of pixels -} - -Selection support is enabled by setting the mode to one of "x", "y" or "xy". -In "x" mode, the user will only be able to specify the x range, similarly for -"y" mode. For "xy", the selection becomes a rectangle where both ranges can be -specified. "color" is color of the selection (if you need to change the color -later on, you can get to it with plot.getOptions().selection.color). "shape" -is the shape of the corners of the selection. - -"minSize" is the minimum size a selection can be in pixels. This value can -be customized to determine the smallest size a selection can be and still -have the selection rectangle be displayed. When customizing this value, the -fact that it refers to pixels, not axis units must be taken into account. -Thus, for example, if there is a bar graph in time mode with BarWidth set to 1 -minute, setting "minSize" to 1 will not make the minimum selection size 1 -minute, but rather 1 pixel. Note also that setting "minSize" to 0 will prevent -"plotunselected" events from being fired when the user clicks the mouse without -dragging. - -When selection support is enabled, a "plotselected" event will be emitted on -the DOM element you passed into the plot function. The event handler gets a -parameter with the ranges selected on the axes, like this: - - placeholder.bind( "plotselected", function( event, ranges ) { - alert("You selected " + ranges.xaxis.from + " to " + ranges.xaxis.to) - // similar for yaxis - with multiple axes, the extra ones are in - // x2axis, x3axis, ... - }); - -The "plotselected" event is only fired when the user has finished making the -selection. A "plotselecting" event is fired during the process with the same -parameters as the "plotselected" event, in case you want to know what's -happening while it's happening, - -A "plotunselected" event with no arguments is emitted when the user clicks the -mouse to remove the selection. As stated above, setting "minSize" to 0 will -destroy this behavior. - -The plugin allso adds the following methods to the plot object: - -- setSelection( ranges, preventEvent ) - - Set the selection rectangle. The passed in ranges is on the same form as - returned in the "plotselected" event. If the selection mode is "x", you - should put in either an xaxis range, if the mode is "y" you need to put in - an yaxis range and both xaxis and yaxis if the selection mode is "xy", like - this: - - setSelection({ xaxis: { from: 0, to: 10 }, yaxis: { from: 40, to: 60 } }); - - setSelection will trigger the "plotselected" event when called. If you don't - want that to happen, e.g. if you're inside a "plotselected" handler, pass - true as the second parameter. If you are using multiple axes, you can - specify the ranges on any of those, e.g. as x2axis/x3axis/... instead of - xaxis, the plugin picks the first one it sees. - -- clearSelection( preventEvent ) - - Clear the selection rectangle. Pass in true to avoid getting a - "plotunselected" event. - -- getSelection() - - Returns the current selection in the same format as the "plotselected" - event. If there's currently no selection, the function returns null. - -*/ - -(function ($) { - function init(plot) { - var selection = { - first: {x: -1, y: -1}, - second: {x: -1, y: -1}, - show: false, - currentMode: 'xy', - active: false - }; - - var SNAPPING_CONSTANT = $.plot.uiConstants.SNAPPING_CONSTANT; - - // FIXME: The drag handling implemented here should be - // abstracted out, there's some similar code from a library in - // the navigation plugin, this should be massaged a bit to fit - // the Flot cases here better and reused. Doing this would - // make this plugin much slimmer. - var savedhandlers = {}; - - var mouseUpHandler = null; - - function onMouseMove(e) { - if (selection.active) { - updateSelection(e); - - plot.getPlaceholder().trigger("plotselecting", [ getSelection() ]); - } - } - - function onMouseDown(e) { - // only accept left-click - if (e.which !== 1) return; - - // cancel out any text selections - document.body.focus(); - - // prevent text selection and drag in old-school browsers - if (document.onselectstart !== undefined && savedhandlers.onselectstart == null) { - savedhandlers.onselectstart = document.onselectstart; - document.onselectstart = function () { return false; }; - } - if (document.ondrag !== undefined && savedhandlers.ondrag == null) { - savedhandlers.ondrag = document.ondrag; - document.ondrag = function () { return false; }; - } - - setSelectionPos(selection.first, e); - - selection.active = true; - - // this is a bit silly, but we have to use a closure to be - // able to whack the same handler again - mouseUpHandler = function (e) { onMouseUp(e); }; - - $(document).one("mouseup", mouseUpHandler); - } - - function onMouseUp(e) { - mouseUpHandler = null; - - // revert drag stuff for old-school browsers - if (document.onselectstart !== undefined) { - document.onselectstart = savedhandlers.onselectstart; - } - - if (document.ondrag !== undefined) { - document.ondrag = savedhandlers.ondrag; - } - - // no more dragging - selection.active = false; - updateSelection(e); - - if (selectionIsSane()) { - triggerSelectedEvent(); - } else { - // this counts as a clear - plot.getPlaceholder().trigger("plotunselected", [ ]); - plot.getPlaceholder().trigger("plotselecting", [ null ]); - } - - return false; - } - - function getSelection() { - if (!selectionIsSane()) return null; - - if (!selection.show) return null; - - var r = {}, - c1 = {x: selection.first.x, y: selection.first.y}, - c2 = {x: selection.second.x, y: selection.second.y}; - - if (selectionDirection(plot) === 'x') { - c1.y = 0; - c2.y = plot.height(); - } - - if (selectionDirection(plot) === 'y') { - c1.x = 0; - c2.x = plot.width(); - } - - $.each(plot.getAxes(), function (name, axis) { - if (axis.used) { - var p1 = axis.c2p(c1[axis.direction]), p2 = axis.c2p(c2[axis.direction]); - r[name] = { from: Math.min(p1, p2), to: Math.max(p1, p2) }; - } - }); - return r; - } - - function triggerSelectedEvent() { - var r = getSelection(); - - plot.getPlaceholder().trigger("plotselected", [ r ]); - - // backwards-compat stuff, to be removed in future - if (r.xaxis && r.yaxis) { - plot.getPlaceholder().trigger("selected", [ { x1: r.xaxis.from, y1: r.yaxis.from, x2: r.xaxis.to, y2: r.yaxis.to } ]); - } - } - - function clamp(min, value, max) { - return value < min ? min : (value > max ? max : value); - } - - function selectionDirection(plot) { - var o = plot.getOptions(); - - if (o.selection.mode === 'smart') { - return selection.currentMode; - } else { - return o.selection.mode; - } - } - - function updateMode(pos) { - if (selection.first) { - var delta = { - x: pos.x - selection.first.x, - y: pos.y - selection.first.y - }; - - if (Math.abs(delta.x) < SNAPPING_CONSTANT) { - selection.currentMode = 'y'; - } else if (Math.abs(delta.y) < SNAPPING_CONSTANT) { - selection.currentMode = 'x'; - } else { - selection.currentMode = 'xy'; - } - } - } - - function setSelectionPos(pos, e) { - var offset = plot.getPlaceholder().offset(); - var plotOffset = plot.getPlotOffset(); - pos.x = clamp(0, e.pageX - offset.left - plotOffset.left, plot.width()); - pos.y = clamp(0, e.pageY - offset.top - plotOffset.top, plot.height()); - - if (pos !== selection.first) updateMode(pos); - - if (selectionDirection(plot) === "y") { - pos.x = pos === selection.first ? 0 : plot.width(); - } - - if (selectionDirection(plot) === "x") { - pos.y = pos === selection.first ? 0 : plot.height(); - } - } - - function updateSelection(pos) { - if (pos.pageX == null) return; - - setSelectionPos(selection.second, pos); - if (selectionIsSane()) { - selection.show = true; - plot.triggerRedrawOverlay(); - } else clearSelection(true); - } - - function clearSelection(preventEvent) { - if (selection.show) { - selection.show = false; - selection.currentMode = ''; - plot.triggerRedrawOverlay(); - if (!preventEvent) { - plot.getPlaceholder().trigger("plotunselected", [ ]); - } - } - } - - // function taken from markings support in Flot - function extractRange(ranges, coord) { - var axis, from, to, key, axes = plot.getAxes(); - - for (var k in axes) { - axis = axes[k]; - if (axis.direction === coord) { - key = coord + axis.n + "axis"; - if (!ranges[key] && axis.n === 1) { - // support x1axis as xaxis - key = coord + "axis"; - } - - if (ranges[key]) { - from = ranges[key].from; - to = ranges[key].to; - break; - } - } - } - - // backwards-compat stuff - to be removed in future - if (!ranges[key]) { - axis = coord === "x" ? plot.getXAxes()[0] : plot.getYAxes()[0]; - from = ranges[coord + "1"]; - to = ranges[coord + "2"]; - } - - // auto-reverse as an added bonus - if (from != null && to != null && from > to) { - var tmp = from; - from = to; - to = tmp; - } - - return { from: from, to: to, axis: axis }; - } - - function setSelection(ranges, preventEvent) { - var range; - - if (selectionDirection(plot) === "y") { - selection.first.x = 0; - selection.second.x = plot.width(); - } else { - range = extractRange(ranges, "x"); - selection.first.x = range.axis.p2c(range.from); - selection.second.x = range.axis.p2c(range.to); - } - - if (selectionDirection(plot) === "x") { - selection.first.y = 0; - selection.second.y = plot.height(); - } else { - range = extractRange(ranges, "y"); - selection.first.y = range.axis.p2c(range.from); - selection.second.y = range.axis.p2c(range.to); - } - - selection.show = true; - plot.triggerRedrawOverlay(); - if (!preventEvent && selectionIsSane()) { - triggerSelectedEvent(); - } - } - - function selectionIsSane() { - var minSize = plot.getOptions().selection.minSize; - return Math.abs(selection.second.x - selection.first.x) >= minSize && - Math.abs(selection.second.y - selection.first.y) >= minSize; - } - - plot.clearSelection = clearSelection; - plot.setSelection = setSelection; - plot.getSelection = getSelection; - - plot.hooks.bindEvents.push(function(plot, eventHolder) { - var o = plot.getOptions(); - if (o.selection.mode != null) { - eventHolder.mousemove(onMouseMove); - eventHolder.mousedown(onMouseDown); - } - }); - - function drawSelectionDecorations(ctx, x, y, w, h, oX, oY, mode) { - var spacing = 3; - var fullEarWidth = 15; - var earWidth = Math.max(0, Math.min(fullEarWidth, w / 2 - 2, h / 2 - 2)); - ctx.fillStyle = '#ffffff'; - - if (mode === 'xy') { - ctx.beginPath(); - ctx.moveTo(x, y + earWidth); - ctx.lineTo(x - 3, y + earWidth); - ctx.lineTo(x - 3, y - 3); - ctx.lineTo(x + earWidth, y - 3); - ctx.lineTo(x + earWidth, y); - ctx.lineTo(x, y); - ctx.closePath(); - - ctx.moveTo(x, y + h - earWidth); - ctx.lineTo(x - 3, y + h - earWidth); - ctx.lineTo(x - 3, y + h + 3); - ctx.lineTo(x + earWidth, y + h + 3); - ctx.lineTo(x + earWidth, y + h); - ctx.lineTo(x, y + h); - ctx.closePath(); - - ctx.moveTo(x + w, y + earWidth); - ctx.lineTo(x + w + 3, y + earWidth); - ctx.lineTo(x + w + 3, y - 3); - ctx.lineTo(x + w - earWidth, y - 3); - ctx.lineTo(x + w - earWidth, y); - ctx.lineTo(x + w, y); - ctx.closePath(); - - ctx.moveTo(x + w, y + h - earWidth); - ctx.lineTo(x + w + 3, y + h - earWidth); - ctx.lineTo(x + w + 3, y + h + 3); - ctx.lineTo(x + w - earWidth, y + h + 3); - ctx.lineTo(x + w - earWidth, y + h); - ctx.lineTo(x + w, y + h); - ctx.closePath(); - - ctx.stroke(); - ctx.fill(); - } - - x = oX; - y = oY; - - if (mode === 'x') { - ctx.beginPath(); - ctx.moveTo(x, y + fullEarWidth); - ctx.lineTo(x, y - fullEarWidth); - ctx.lineTo(x - spacing, y - fullEarWidth); - ctx.lineTo(x - spacing, y + fullEarWidth); - ctx.closePath(); - - ctx.moveTo(x + w, y + fullEarWidth); - ctx.lineTo(x + w, y - fullEarWidth); - ctx.lineTo(x + w + spacing, y - fullEarWidth); - ctx.lineTo(x + w + spacing, y + fullEarWidth); - ctx.closePath(); - ctx.stroke(); - ctx.fill(); - } - - if (mode === 'y') { - ctx.beginPath(); - - ctx.moveTo(x - fullEarWidth, y); - ctx.lineTo(x + fullEarWidth, y); - ctx.lineTo(x + fullEarWidth, y - spacing); - ctx.lineTo(x - fullEarWidth, y - spacing); - ctx.closePath(); - - ctx.moveTo(x - fullEarWidth, y + h); - ctx.lineTo(x + fullEarWidth, y + h); - ctx.lineTo(x + fullEarWidth, y + h + spacing); - ctx.lineTo(x - fullEarWidth, y + h + spacing); - ctx.closePath(); - ctx.stroke(); - ctx.fill(); - } - } - - plot.hooks.drawOverlay.push(function (plot, ctx) { - // draw selection - if (selection.show && selectionIsSane()) { - var plotOffset = plot.getPlotOffset(); - var o = plot.getOptions(); - - ctx.save(); - ctx.translate(plotOffset.left, plotOffset.top); - - var c = $.color.parse(o.selection.color); - - ctx.strokeStyle = c.scale('a', 1).toString(); - ctx.lineWidth = 1; - ctx.lineJoin = o.selection.shape; - ctx.fillStyle = c.scale('a', 0.4).toString(); - - var x = Math.min(selection.first.x, selection.second.x) + 0.5, - oX = x, - y = Math.min(selection.first.y, selection.second.y) + 0.5, - oY = y, - w = Math.abs(selection.second.x - selection.first.x) - 1, - h = Math.abs(selection.second.y - selection.first.y) - 1; - - if (selectionDirection(plot) === 'x') { - h += y; - y = 0; - } - - if (selectionDirection(plot) === 'y') { - w += x; - x = 0; - } - - ctx.fillRect(0, 0, plot.width(), plot.height()); - ctx.clearRect(x, y, w, h); - drawSelectionDecorations(ctx, x, y, w, h, oX, oY, selectionDirection(plot)); - - ctx.restore(); - } - }); - - plot.hooks.shutdown.push(function (plot, eventHolder) { - eventHolder.unbind("mousemove", onMouseMove); - eventHolder.unbind("mousedown", onMouseDown); - - if (mouseUpHandler) { - $(document).unbind("mouseup", mouseUpHandler); - } - }); - } - - $.plot.plugins.push({ - init: init, - options: { - selection: { - mode: null, // one of null, "x", "y" or "xy" - color: "#888888", - shape: "round", // one of "round", "miter", or "bevel" - minSize: 5 // minimum number of pixels - } - }, - name: 'selection', - version: '1.1' - }); -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.stack.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.stack.js deleted file mode 100644 index f4b88e7..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.stack.js +++ /dev/null @@ -1,220 +0,0 @@ -/* Flot plugin for stacking data sets rather than overlaying them. - -Copyright (c) 2007-2014 IOLA and Ole Laursen. -Licensed under the MIT license. - -The plugin assumes the data is sorted on x (or y if stacking horizontally). -For line charts, it is assumed that if a line has an undefined gap (from a -null point), then the line above it should have the same gap - insert zeros -instead of "null" if you want another behaviour. This also holds for the start -and end of the chart. Note that stacking a mix of positive and negative values -in most instances doesn't make sense (so it looks weird). - -Two or more series are stacked when their "stack" attribute is set to the same -key (which can be any number or string or just "true"). To specify the default -stack, you can set the stack option like this: - - series: { - stack: null/false, true, or a key (number/string) - } - -You can also specify it for a single series, like this: - - $.plot( $("#placeholder"), [{ - data: [ ... ], - stack: true - }]) - -The stacking order is determined by the order of the data series in the array -(later series end up on top of the previous). - -Internally, the plugin modifies the datapoints in each series, adding an -offset to the y value. For line series, extra data points are inserted through -interpolation. If there's a second y value, it's also adjusted (e.g for bar -charts or filled areas). - -*/ - -(function ($) { - var options = { - series: { stack: null } // or number/string - }; - - function init(plot) { - function findMatchingSeries(s, allseries) { - var res = null; - for (var i = 0; i < allseries.length; ++i) { - if (s === allseries[i]) break; - - if (allseries[i].stack === s.stack) { - res = allseries[i]; - } - } - - return res; - } - - function addBottomPoints (s, datapoints) { - var formattedPoints = []; - for (var i = 0; i < datapoints.points.length; i += 2) { - formattedPoints.push(datapoints.points[i]); - formattedPoints.push(datapoints.points[i + 1]); - formattedPoints.push(0); - } - - datapoints.format.push({ - x: false, - y: true, - number: true, - required: false, - computeRange: s.yaxis.options.autoScale !== 'none', - defaultValue: 0 - }); - datapoints.points = formattedPoints; - datapoints.pointsize = 3; - } - - function stackData(plot, s, datapoints) { - if (s.stack == null || s.stack === false) return; - - var needsBottom = s.bars.show || (s.lines.show && s.lines.fill); - var hasBottom = datapoints.pointsize > 2 && (horizontal ? datapoints.format[2].x : datapoints.format[2].y); - // Series data is missing bottom points - need to format - if (needsBottom && !hasBottom) { - addBottomPoints(s, datapoints); - } - - var other = findMatchingSeries(s, plot.getData()); - if (!other) return; - - var ps = datapoints.pointsize, - points = datapoints.points, - otherps = other.datapoints.pointsize, - otherpoints = other.datapoints.points, - newpoints = [], - px, py, intery, qx, qy, bottom, - withlines = s.lines.show, - horizontal = s.bars.horizontal, - withsteps = withlines && s.lines.steps, - fromgap = true, - keyOffset = horizontal ? 1 : 0, - accumulateOffset = horizontal ? 0 : 1, - i = 0, j = 0, l, m; - - while (true) { - if (i >= points.length) break; - - l = newpoints.length; - - if (points[i] == null) { - // copy gaps - for (m = 0; m < ps; ++m) { - newpoints.push(points[i + m]); - } - - i += ps; - } else if (j >= otherpoints.length) { - // for lines, we can't use the rest of the points - if (!withlines) { - for (m = 0; m < ps; ++m) { - newpoints.push(points[i + m]); - } - } - - i += ps; - } else if (otherpoints[j] == null) { - // oops, got a gap - for (m = 0; m < ps; ++m) { - newpoints.push(null); - } - - fromgap = true; - j += otherps; - } else { - // cases where we actually got two points - px = points[i + keyOffset]; - py = points[i + accumulateOffset]; - qx = otherpoints[j + keyOffset]; - qy = otherpoints[j + accumulateOffset]; - bottom = 0; - - if (px === qx) { - for (m = 0; m < ps; ++m) { - newpoints.push(points[i + m]); - } - - newpoints[l + accumulateOffset] += qy; - bottom = qy; - - i += ps; - j += otherps; - } else if (px > qx) { - // we got past point below, might need to - // insert interpolated extra point - if (withlines && i > 0 && points[i - ps] != null) { - intery = py + (points[i - ps + accumulateOffset] - py) * (qx - px) / (points[i - ps + keyOffset] - px); - newpoints.push(qx); - newpoints.push(intery + qy); - for (m = 2; m < ps; ++m) { - newpoints.push(points[i + m]); - } - - bottom = qy; - } - - j += otherps; - } else { // px < qx - if (fromgap && withlines) { - // if we come from a gap, we just skip this point - i += ps; - continue; - } - - for (m = 0; m < ps; ++m) { - newpoints.push(points[i + m]); - } - - // we might be able to interpolate a point below, - // this can give us a better y - if (withlines && j > 0 && otherpoints[j - otherps] != null) { - bottom = qy + (otherpoints[j - otherps + accumulateOffset] - qy) * (px - qx) / (otherpoints[j - otherps + keyOffset] - qx); - } - - newpoints[l + accumulateOffset] += bottom; - - i += ps; - } - - fromgap = false; - - if (l !== newpoints.length && needsBottom) { - newpoints[l + 2] += bottom; - } - } - - // maintain the line steps invariant - if (withsteps && l !== newpoints.length && l > 0 && - newpoints[l] !== null && - newpoints[l] !== newpoints[l - ps] && - newpoints[l + 1] !== newpoints[l - ps + 1]) { - for (m = 0; m < ps; ++m) { - newpoints[l + ps + m] = newpoints[l + m]; - } - - newpoints[l + 1] = newpoints[l - ps + 1]; - } - } - - datapoints.points = newpoints; - } - - plot.hooks.processDatapoints.push(stackData); - } - - $.plot.plugins.push({ - init: init, - options: options, - name: 'stack', - version: '1.2' - }); -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.symbol.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.symbol.js deleted file mode 100644 index 0e06513..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.symbol.js +++ /dev/null @@ -1,98 +0,0 @@ -/* Flot plugin that adds some extra symbols for plotting points. - -Copyright (c) 2007-2014 IOLA and Ole Laursen. -Licensed under the MIT license. - -The symbols are accessed as strings through the standard symbol options: - - series: { - points: { - symbol: "square" // or "diamond", "triangle", "cross", "plus", "ellipse", "rectangle" - } - } - -*/ - -(function ($) { - // we normalize the area of each symbol so it is approximately the - // same as a circle of the given radius - - var square = function (ctx, x, y, radius, shadow) { - // pi * r^2 = (2s)^2 => s = r * sqrt(pi)/2 - var size = radius * Math.sqrt(Math.PI) / 2; - ctx.rect(x - size, y - size, size + size, size + size); - }, - rectangle = function (ctx, x, y, radius, shadow) { - // pi * r^2 = (2s)^2 => s = r * sqrt(pi)/2 - var size = radius * Math.sqrt(Math.PI) / 2; - ctx.rect(x - size, y - size, size + size, size + size); - }, - diamond = function (ctx, x, y, radius, shadow) { - // pi * r^2 = 2s^2 => s = r * sqrt(pi/2) - var size = radius * Math.sqrt(Math.PI / 2); - ctx.moveTo(x - size, y); - ctx.lineTo(x, y - size); - ctx.lineTo(x + size, y); - ctx.lineTo(x, y + size); - ctx.lineTo(x - size, y); - ctx.lineTo(x, y - size); - }, - triangle = function (ctx, x, y, radius, shadow) { - // pi * r^2 = 1/2 * s^2 * sin (pi / 3) => s = r * sqrt(2 * pi / sin(pi / 3)) - var size = radius * Math.sqrt(2 * Math.PI / Math.sin(Math.PI / 3)); - var height = size * Math.sin(Math.PI / 3); - ctx.moveTo(x - size / 2, y + height / 2); - ctx.lineTo(x + size / 2, y + height / 2); - if (!shadow) { - ctx.lineTo(x, y - height / 2); - ctx.lineTo(x - size / 2, y + height / 2); - ctx.lineTo(x + size / 2, y + height / 2); - } - }, - cross = function (ctx, x, y, radius, shadow) { - // pi * r^2 = (2s)^2 => s = r * sqrt(pi)/2 - var size = radius * Math.sqrt(Math.PI) / 2; - ctx.moveTo(x - size, y - size); - ctx.lineTo(x + size, y + size); - ctx.moveTo(x - size, y + size); - ctx.lineTo(x + size, y - size); - }, - ellipse = function(ctx, x, y, radius, shadow, fill) { - if (!shadow) { - ctx.moveTo(x + radius, y); - ctx.arc(x, y, radius, 0, Math.PI * 2, false); - } - }, - plus = function (ctx, x, y, radius, shadow) { - var size = radius * Math.sqrt(Math.PI / 2); - ctx.moveTo(x - size, y); - ctx.lineTo(x + size, y); - ctx.moveTo(x, y + size); - ctx.lineTo(x, y - size); - }, - handlers = { - square: square, - rectangle: rectangle, - diamond: diamond, - triangle: triangle, - cross: cross, - ellipse: ellipse, - plus: plus - }; - - square.fill = true; - rectangle.fill = true; - diamond.fill = true; - triangle.fill = true; - ellipse.fill = true; - - function init(plot) { - plot.drawSymbol = handlers; - } - - $.plot.plugins.push({ - init: init, - name: 'symbols', - version: '1.0' - }); -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.threshold.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.threshold.js deleted file mode 100644 index db5a59c..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.threshold.js +++ /dev/null @@ -1,143 +0,0 @@ -/* Flot plugin for thresholding data. - -Copyright (c) 2007-2014 IOLA and Ole Laursen. -Licensed under the MIT license. - -The plugin supports these options: - - series: { - threshold: { - below: number - color: colorspec - } - } - -It can also be applied to a single series, like this: - - $.plot( $("#placeholder"), [{ - data: [ ... ], - threshold: { ... } - }]) - -An array can be passed for multiple thresholding, like this: - - threshold: [{ - below: number1 - color: color1 - },{ - below: number2 - color: color2 - }] - -These multiple threshold objects can be passed in any order since they are -sorted by the processing function. - -The data points below "below" are drawn with the specified color. This makes -it easy to mark points below 0, e.g. for budget data. - -Internally, the plugin works by splitting the data into two series, above and -below the threshold. The extra series below the threshold will have its label -cleared and the special "originSeries" attribute set to the original series. -You may need to check for this in hover events. - -*/ - -(function ($) { - var options = { - series: { threshold: null } // or { below: number, color: color spec} - }; - - function init(plot) { - function thresholdData(plot, s, datapoints, below, color) { - var ps = datapoints.pointsize, i, x, y, p, prevp, - thresholded = $.extend({}, s); // note: shallow copy - - thresholded.datapoints = { points: [], pointsize: ps, format: datapoints.format }; - thresholded.label = null; - thresholded.color = color; - thresholded.threshold = null; - thresholded.originSeries = s; - thresholded.data = []; - - var origpoints = datapoints.points, - addCrossingPoints = s.lines.show; - - var threspoints = []; - var newpoints = []; - var m; - - for (i = 0; i < origpoints.length; i += ps) { - x = origpoints[i]; - y = origpoints[i + 1]; - - prevp = p; - if (y < below) p = threspoints; - else p = newpoints; - - if (addCrossingPoints && prevp !== p && - x !== null && i > 0 && - origpoints[i - ps] != null) { - var interx = x + (below - y) * (x - origpoints[i - ps]) / (y - origpoints[i - ps + 1]); - prevp.push(interx); - prevp.push(below); - for (m = 2; m < ps; ++m) { - prevp.push(origpoints[i + m]); - } - - p.push(null); // start new segment - p.push(null); - for (m = 2; m < ps; ++m) { - p.push(origpoints[i + m]); - } - - p.push(interx); - p.push(below); - for (m = 2; m < ps; ++m) { - p.push(origpoints[i + m]); - } - } - - p.push(x); - p.push(y); - for (m = 2; m < ps; ++m) { - p.push(origpoints[i + m]); - } - } - - datapoints.points = newpoints; - thresholded.datapoints.points = threspoints; - - if (thresholded.datapoints.points.length > 0) { - var origIndex = $.inArray(s, plot.getData()); - // Insert newly-generated series right after original one (to prevent it from becoming top-most) - plot.getData().splice(origIndex + 1, 0, thresholded); - } - - // FIXME: there are probably some edge cases left in bars - } - - function processThresholds(plot, s, datapoints) { - if (!s.threshold) return; - if (s.threshold instanceof Array) { - s.threshold.sort(function(a, b) { - return a.below - b.below; - }); - - $(s.threshold).each(function(i, th) { - thresholdData(plot, s, datapoints, th.below, th.color); - }); - } else { - thresholdData(plot, s, datapoints, s.threshold.below, s.threshold.color); - } - } - - plot.hooks.processDatapoints.push(processThresholds); - } - - $.plot.plugins.push({ - init: init, - options: options, - name: 'threshold', - version: '1.2' - }); -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.time.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.time.js deleted file mode 100644 index 3dac995..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.time.js +++ /dev/null @@ -1,460 +0,0 @@ -/* Pretty handling of time axes. - -Copyright (c) 2007-2014 IOLA and Ole Laursen. -Licensed under the MIT license. - -Set axis.mode to "time" to enable. See the section "Time series data" in -API.txt for details. -*/ - -(function($) { - 'use strict'; - - var options = { - xaxis: { - timezone: null, // "browser" for local to the client or timezone for timezone-js - timeformat: null, // format string to use - twelveHourClock: false, // 12 or 24 time in time mode - monthNames: null, // list of names of months - timeBase: 'seconds' // are the values in milliseconds or seconds - }, - yaxis: { - timeBase: 'seconds' - } - }; - - var floorInBase = $.plot.saturated.floorInBase; - - // Returns a string with the date d formatted according to fmt. - // A subset of the Open Group's strftime format is supported. - - function formatDate(d, fmt, monthNames, dayNames) { - if (typeof d.strftime === "function") { - return d.strftime(fmt); - } - - var leftPad = function(n, pad) { - n = "" + n; - pad = "" + (pad == null ? "0" : pad); - return n.length == 1 ? pad + n : n; - }; - - var r = []; - var escape = false; - var hours = d.getHours(); - var isAM = hours < 12; - - if (!monthNames) { - monthNames = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]; - } - - if (!dayNames) { - dayNames = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"]; - } - - var hours12; - if (hours > 12) { - hours12 = hours - 12; - } else if (hours == 0) { - hours12 = 12; - } else { - hours12 = hours; - } - - for (var i = 0; i < fmt.length; ++i) { - var c = fmt.charAt(i); - - if (escape) { - switch (c) { - case 'a': c = "" + dayNames[d.getDay()]; break; - case 'b': c = "" + monthNames[d.getMonth()]; break; - case 'd': c = leftPad(d.getDate()); break; - case 'e': c = leftPad(d.getDate(), " "); break; - case 'h': // For back-compat with 0.7; remove in 1.0 - case 'H': c = leftPad(hours); break; - case 'I': c = leftPad(hours12); break; - case 'l': c = leftPad(hours12, " "); break; - case 'm': c = leftPad(d.getMonth() + 1); break; - case 'M': c = leftPad(d.getMinutes()); break; - // quarters not in Open Group's strftime specification - case 'q': - c = "" + (Math.floor(d.getMonth() / 3) + 1); break; - case 'S': c = leftPad(d.getSeconds()); break; - case 'y': c = leftPad(d.getFullYear() % 100); break; - case 'Y': c = "" + d.getFullYear(); break; - case 'p': c = (isAM) ? ("" + "am") : ("" + "pm"); break; - case 'P': c = (isAM) ? ("" + "AM") : ("" + "PM"); break; - case 'w': c = "" + d.getDay(); break; - } - r.push(c); - escape = false; - } else { - if (c == "%") { - escape = true; - } else { - r.push(c); - } - } - } - - return r.join(""); - } - - // To have a consistent view of time-based data independent of which time - // zone the client happens to be in we need a date-like object independent - // of time zones. This is done through a wrapper that only calls the UTC - // versions of the accessor methods. - - function makeUtcWrapper(d) { - function addProxyMethod(sourceObj, sourceMethod, targetObj, targetMethod) { - sourceObj[sourceMethod] = function() { - return targetObj[targetMethod].apply(targetObj, arguments); - }; - } - - var utc = { - date: d - }; - - // support strftime, if found - if (d.strftime !== undefined) { - addProxyMethod(utc, "strftime", d, "strftime"); - } - - addProxyMethod(utc, "getTime", d, "getTime"); - addProxyMethod(utc, "setTime", d, "setTime"); - - var props = ["Date", "Day", "FullYear", "Hours", "Milliseconds", "Minutes", "Month", "Seconds"]; - - for (var p = 0; p < props.length; p++) { - addProxyMethod(utc, "get" + props[p], d, "getUTC" + props[p]); - addProxyMethod(utc, "set" + props[p], d, "setUTC" + props[p]); - } - - return utc; - } - - // select time zone strategy. This returns a date-like object tied to the - // desired timezone - function dateGenerator(ts, opts) { - var maxDateValue = 8640000000000000; - - if (opts && opts.timeBase === 'seconds') { - ts *= 1000; - } - - if (ts > maxDateValue) { - ts = maxDateValue; - } else if (ts < -maxDateValue) { - ts = -maxDateValue; - } - - if (opts.timezone === "browser") { - return new Date(ts); - } else if (!opts.timezone || opts.timezone === "utc") { - return makeUtcWrapper(new Date(ts)); - } else if (typeof timezoneJS !== "undefined" && typeof timezoneJS.Date !== "undefined") { - var d = new timezoneJS.Date(); - // timezone-js is fickle, so be sure to set the time zone before - // setting the time. - d.setTimezone(opts.timezone); - d.setTime(ts); - return d; - } else { - return makeUtcWrapper(new Date(ts)); - } - } - - // map of app. size of time units in milliseconds - var timeUnitSizeSeconds = { - "millisecond": 0.001, - "second": 1, - "minute": 60, - "hour": 60 * 60, - "day": 24 * 60 * 60, - "month": 30 * 24 * 60 * 60, - "quarter": 3 * 30 * 24 * 60 * 60, - "year": 365.2425 * 24 * 60 * 60 - }; - - var timeUnitSizeMilliseconds = { - "millisecond": 1, - "second": 1000, - "minute": 60 * 1000, - "hour": 60 * 60 * 1000, - "day": 24 * 60 * 60 * 1000, - "month": 30 * 24 * 60 * 60 * 1000, - "quarter": 3 * 30 * 24 * 60 * 60 * 1000, - "year": 365.2425 * 24 * 60 * 60 * 1000 - }; - - // the allowed tick sizes, after 1 year we use - // an integer algorithm - - var baseSpec = [ - [1, "millisecond"], [2, "millisecond"], [5, "millisecond"], [10, "millisecond"], - [25, "millisecond"], [50, "millisecond"], [100, "millisecond"], [250, "millisecond"], [500, "millisecond"], - [1, "second"], [2, "second"], [5, "second"], [10, "second"], - [30, "second"], - [1, "minute"], [2, "minute"], [5, "minute"], [10, "minute"], - [30, "minute"], - [1, "hour"], [2, "hour"], [4, "hour"], - [8, "hour"], [12, "hour"], - [1, "day"], [2, "day"], [3, "day"], - [0.25, "month"], [0.5, "month"], [1, "month"], - [2, "month"] - ]; - - // we don't know which variant(s) we'll need yet, but generating both is - // cheap - - var specMonths = baseSpec.concat([[3, "month"], [6, "month"], - [1, "year"]]); - var specQuarters = baseSpec.concat([[1, "quarter"], [2, "quarter"], - [1, "year"]]); - - - function dateTickGenerator(axis) { - var opts = axis.options, - ticks = [], - d = dateGenerator(axis.min, opts), - minSize = 0; - - // make quarter use a possibility if quarters are - // mentioned in either of these options - var spec = (opts.tickSize && opts.tickSize[1] === - "quarter") || - (opts.minTickSize && opts.minTickSize[1] === - "quarter") ? specQuarters : specMonths; - - var timeUnitSize = opts.timeBase === 'seconds' ? timeUnitSizeSeconds : timeUnitSizeMilliseconds; - - if (opts.minTickSize !== null && opts.minTickSize !== undefined) { - if (typeof opts.tickSize === "number") { - minSize = opts.tickSize; - } else { - minSize = opts.minTickSize[0] * timeUnitSize[opts.minTickSize[1]]; - } - } - - for (var i = 0; i < spec.length - 1; ++i) { - if (axis.delta < (spec[i][0] * timeUnitSize[spec[i][1]] + - spec[i + 1][0] * timeUnitSize[spec[i + 1][1]]) / 2 && - spec[i][0] * timeUnitSize[spec[i][1]] >= minSize) { - break; - } - } - - var size = spec[i][0]; - var unit = spec[i][1]; - // special-case the possibility of several years - if (unit === "year") { - // if given a minTickSize in years, just use it, - // ensuring that it's an integer - - if (opts.minTickSize !== null && opts.minTickSize !== undefined && opts.minTickSize[1] === "year") { - size = Math.floor(opts.minTickSize[0]); - } else { - var magn = Math.pow(10, Math.floor(Math.log(axis.delta / timeUnitSize.year) / Math.LN10)); - var norm = (axis.delta / timeUnitSize.year) / magn; - - if (norm < 1.5) { - size = 1; - } else if (norm < 3) { - size = 2; - } else if (norm < 7.5) { - size = 5; - } else { - size = 10; - } - - size *= magn; - } - - // minimum size for years is 1 - - if (size < 1) { - size = 1; - } - } - - axis.tickSize = opts.tickSize || [size, unit]; - var tickSize = axis.tickSize[0]; - unit = axis.tickSize[1]; - - var step = tickSize * timeUnitSize[unit]; - - if (unit === "millisecond") { - d.setMilliseconds(floorInBase(d.getMilliseconds(), tickSize)); - } else if (unit === "second") { - d.setSeconds(floorInBase(d.getSeconds(), tickSize)); - } else if (unit === "minute") { - d.setMinutes(floorInBase(d.getMinutes(), tickSize)); - } else if (unit === "hour") { - d.setHours(floorInBase(d.getHours(), tickSize)); - } else if (unit === "month") { - d.setMonth(floorInBase(d.getMonth(), tickSize)); - } else if (unit === "quarter") { - d.setMonth(3 * floorInBase(d.getMonth() / 3, - tickSize)); - } else if (unit === "year") { - d.setFullYear(floorInBase(d.getFullYear(), tickSize)); - } - - // reset smaller components - - if (step >= timeUnitSize.second) { - d.setMilliseconds(0); - } - - if (step >= timeUnitSize.minute) { - d.setSeconds(0); - } - if (step >= timeUnitSize.hour) { - d.setMinutes(0); - } - if (step >= timeUnitSize.day) { - d.setHours(0); - } - if (step >= timeUnitSize.day * 4) { - d.setDate(1); - } - if (step >= timeUnitSize.month * 2) { - d.setMonth(floorInBase(d.getMonth(), 3)); - } - if (step >= timeUnitSize.quarter * 2) { - d.setMonth(floorInBase(d.getMonth(), 6)); - } - if (step >= timeUnitSize.year) { - d.setMonth(0); - } - - var carry = 0; - var v = Number.NaN; - var v1000; - var prev; - do { - prev = v; - v1000 = d.getTime(); - if (opts && opts.timeBase === 'seconds') { - v = v1000 / 1000; - } else { - v = v1000; - } - - ticks.push(v); - - if (unit === "month" || unit === "quarter") { - if (tickSize < 1) { - // a bit complicated - we'll divide the - // month/quarter up but we need to take - // care of fractions so we don't end up in - // the middle of a day - d.setDate(1); - var start = d.getTime(); - d.setMonth(d.getMonth() + - (unit === "quarter" ? 3 : 1)); - var end = d.getTime(); - d.setTime((v + carry * timeUnitSize.hour + (end - start) * tickSize)); - carry = d.getHours(); - d.setHours(0); - } else { - d.setMonth(d.getMonth() + - tickSize * (unit === "quarter" ? 3 : 1)); - } - } else if (unit === "year") { - d.setFullYear(d.getFullYear() + tickSize); - } else { - if (opts.timeBase === 'seconds') { - d.setTime((v + step) * 1000); - } else { - d.setTime(v + step); - } - } - } while (v < axis.max && v !== prev); - - return ticks; - }; - - function init(plot) { - plot.hooks.processOptions.push(function (plot) { - $.each(plot.getAxes(), function(axisName, axis) { - var opts = axis.options; - if (opts.mode === "time") { - axis.tickGenerator = dateTickGenerator; - - axis.tickFormatter = function (v, axis) { - var d = dateGenerator(v, axis.options); - - // first check global format - if (opts.timeformat != null) { - return formatDate(d, opts.timeformat, opts.monthNames, opts.dayNames); - } - - // possibly use quarters if quarters are mentioned in - // any of these places - var useQuarters = (axis.options.tickSize && - axis.options.tickSize[1] == "quarter") || - (axis.options.minTickSize && - axis.options.minTickSize[1] == "quarter"); - - var timeUnitSize = opts.timeBase === 'seconds' ? timeUnitSizeSeconds : timeUnitSizeMilliseconds; - - var t = axis.tickSize[0] * timeUnitSize[axis.tickSize[1]]; - var span = axis.max - axis.min; - var suffix = (opts.twelveHourClock) ? " %p" : ""; - var hourCode = (opts.twelveHourClock) ? "%I" : "%H"; - var fmt; - - if (t < timeUnitSize.minute) { - fmt = hourCode + ":%M:%S" + suffix; - } else if (t < timeUnitSize.day) { - if (span < 2 * timeUnitSize.day) { - fmt = hourCode + ":%M" + suffix; - } else { - fmt = "%b %d " + hourCode + ":%M" + suffix; - } - } else if (t < timeUnitSize.month) { - fmt = "%b %d"; - } else if ((useQuarters && t < timeUnitSize.quarter) || - (!useQuarters && t < timeUnitSize.year)) { - if (span < timeUnitSize.year) { - fmt = "%b"; - } else { - fmt = "%b %Y"; - } - } else if (useQuarters && t < timeUnitSize.year) { - if (span < timeUnitSize.year) { - fmt = "Q%q"; - } else { - fmt = "Q%q %Y"; - } - } else { - fmt = "%Y"; - } - - var rt = formatDate(d, fmt, opts.monthNames, opts.dayNames); - - return rt; - }; - } - }); - }); - } - - $.plot.plugins.push({ - init: init, - options: options, - name: 'time', - version: '1.0' - }); - - // Time-axis support used to be in Flot core, which exposed the - // formatDate function on the plot object. Various plugins depend - // on the function, so we need to re-expose it here. - - $.plot.formatDate = formatDate; - $.plot.dateGenerator = dateGenerator; - $.plot.dateTickGenerator = dateTickGenerator; - $.plot.makeUtcWrapper = makeUtcWrapper; -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.touch.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.touch.js deleted file mode 100644 index e5153bf..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.touch.js +++ /dev/null @@ -1,322 +0,0 @@ - -/* global jQuery */ - -(function($) { - 'use strict'; - - var options = { - pan: { - enableTouch: false - } - }; - - function init(plot) { - plot.hooks.processOptions.push(initTouchNavigation); - } - - function initTouchNavigation(plot, options) { - var gestureState = { - twoTouches: false, - currentTapStart: { x: 0, y: 0 }, - currentTapEnd: { x: 0, y: 0 }, - prevTap: { x: 0, y: 0 }, - currentTap: { x: 0, y: 0 }, - interceptedLongTap: false, - allowEventPropagation: false, - prevTapTime: null, - tapStartTime: null, - longTapTriggerId: null - }, - maxDistanceBetweenTaps = 20, - maxIntervalBetweenTaps = 500, - maxLongTapDistance = 20, - minLongTapDuration = 1500, - pressedTapDuration = 125, - mainEventHolder; - - function interpretGestures(e) { - var o = plot.getOptions(); - - if (!o.pan.active && !o.zoom.active) { - return; - } - - updateOnMultipleTouches(e); - mainEventHolder.dispatchEvent(new CustomEvent('touchevent', { detail: e })); - - if (isPinchEvent(e)) { - executeAction(e, 'pinch'); - } else { - executeAction(e, 'pan'); - if (!wasPinchEvent(e)) { - if (isDoubleTap(e)) { - executeAction(e, 'doubleTap'); - } - executeAction(e, 'tap'); - executeAction(e, 'longTap'); - } - } - } - - function executeAction(e, gesture) { - switch (gesture) { - case 'pan': - pan[e.type](e); - break; - case 'pinch': - pinch[e.type](e); - break; - case 'doubleTap': - doubleTap.onDoubleTap(e); - break; - case 'longTap': - longTap[e.type](e); - break; - case 'tap': - tap[e.type](e); - break; - default: - break; - } - } - - function bindEvents(plot, eventHolder) { - mainEventHolder = eventHolder[0]; - eventHolder[0].addEventListener('touchstart', interpretGestures, false); - eventHolder[0].addEventListener('touchmove', interpretGestures, false); - eventHolder[0].addEventListener('touchend', interpretGestures, false); - } - - function shutdown(plot, eventHolder) { - eventHolder[0].removeEventListener('touchstart', interpretGestures); - eventHolder[0].removeEventListener('touchmove', interpretGestures); - eventHolder[0].removeEventListener('touchend', interpretGestures); - if (gestureState.longTapTriggerId) { - clearTimeout(gestureState.longTapTriggerId); - gestureState.longTapTriggerId = null; - } - } - - var pan = { - touchstart: function(e) { - updatePrevForDoubleTap(); - updateCurrentForDoubleTap(e); - updateStateForLongTapStart(e); - - mainEventHolder.dispatchEvent(new CustomEvent('panstart', { detail: e })); - }, - - touchmove: function(e) { - preventEventPropagation(e); - - updateCurrentForDoubleTap(e); - updateStateForLongTapEnd(e); - - if (!gestureState.allowEventPropagation) { - mainEventHolder.dispatchEvent(new CustomEvent('pandrag', { detail: e })); - } - }, - - touchend: function(e) { - preventEventPropagation(e); - - if (wasPinchEvent(e)) { - mainEventHolder.dispatchEvent(new CustomEvent('pinchend', { detail: e })); - mainEventHolder.dispatchEvent(new CustomEvent('panstart', { detail: e })); - } else if (noTouchActive(e)) { - mainEventHolder.dispatchEvent(new CustomEvent('panend', { detail: e })); - } - } - }; - - var pinch = { - touchstart: function(e) { - mainEventHolder.dispatchEvent(new CustomEvent('pinchstart', { detail: e })); - }, - - touchmove: function(e) { - preventEventPropagation(e); - gestureState.twoTouches = isPinchEvent(e); - if (!gestureState.allowEventPropagation) { - mainEventHolder.dispatchEvent(new CustomEvent('pinchdrag', { detail: e })); - } - }, - - touchend: function(e) { - preventEventPropagation(e); - } - }; - - var doubleTap = { - onDoubleTap: function(e) { - preventEventPropagation(e); - mainEventHolder.dispatchEvent(new CustomEvent('doubletap', { detail: e })); - } - }; - - var longTap = { - touchstart: function(e) { - longTap.waitForLongTap(e); - }, - - touchmove: function(e) { - }, - - touchend: function(e) { - if (gestureState.longTapTriggerId) { - clearTimeout(gestureState.longTapTriggerId); - gestureState.longTapTriggerId = null; - } - }, - - isLongTap: function(e) { - var currentTime = new Date().getTime(), - tapDuration = currentTime - gestureState.tapStartTime; - if (tapDuration >= minLongTapDuration && !gestureState.interceptedLongTap) { - if (distance(gestureState.currentTapStart.x, gestureState.currentTapStart.y, gestureState.currentTapEnd.x, gestureState.currentTapEnd.y) < maxLongTapDistance) { - gestureState.interceptedLongTap = true; - return true; - } - } - return false; - }, - - waitForLongTap: function(e) { - var longTapTrigger = function() { - if (longTap.isLongTap(e)) { - mainEventHolder.dispatchEvent(new CustomEvent('longtap', { detail: e })); - } - gestureState.longTapTriggerId = null; - }; - if (!gestureState.longTapTriggerId) { - gestureState.longTapTriggerId = setTimeout(longTapTrigger, minLongTapDuration); - } - } - }; - - var tap = { - touchstart: function(e) { - gestureState.tapStartTime = new Date().getTime(); - }, - - touchmove: function(e) { - }, - - touchend: function(e) { - if (tap.isTap(e)) { - mainEventHolder.dispatchEvent(new CustomEvent('tap', { detail: e })); - preventEventPropagation(e); - } - }, - - isTap: function(e) { - var currentTime = new Date().getTime(), - tapDuration = currentTime - gestureState.tapStartTime; - if (tapDuration <= pressedTapDuration) { - if (distance(gestureState.currentTapStart.x, gestureState.currentTapStart.y, gestureState.currentTapEnd.x, gestureState.currentTapEnd.y) < maxLongTapDistance) { - return true; - } - } - return false; - } - }; - - if (options.pan.enableTouch === true) { - plot.hooks.bindEvents.push(bindEvents); - plot.hooks.shutdown.push(shutdown); - }; - - function updatePrevForDoubleTap() { - gestureState.prevTap = { - x: gestureState.currentTap.x, - y: gestureState.currentTap.y - }; - }; - - function updateCurrentForDoubleTap(e) { - gestureState.currentTap = { - x: e.touches[0].pageX, - y: e.touches[0].pageY - }; - } - - function updateStateForLongTapStart(e) { - gestureState.tapStartTime = new Date().getTime(); - gestureState.interceptedLongTap = false; - gestureState.currentTapStart = { - x: e.touches[0].pageX, - y: e.touches[0].pageY - }; - gestureState.currentTapEnd = { - x: e.touches[0].pageX, - y: e.touches[0].pageY - }; - }; - - function updateStateForLongTapEnd(e) { - gestureState.currentTapEnd = { - x: e.touches[0].pageX, - y: e.touches[0].pageY - }; - }; - - function isDoubleTap(e) { - var currentTime = new Date().getTime(), - intervalBetweenTaps = currentTime - gestureState.prevTapTime; - - if (intervalBetweenTaps >= 0 && intervalBetweenTaps < maxIntervalBetweenTaps) { - if (distance(gestureState.prevTap.x, gestureState.prevTap.y, gestureState.currentTap.x, gestureState.currentTap.y) < maxDistanceBetweenTaps) { - e.firstTouch = gestureState.prevTap; - e.secondTouch = gestureState.currentTap; - return true; - } - } - gestureState.prevTapTime = currentTime; - return false; - } - - function preventEventPropagation(e) { - if (!gestureState.allowEventPropagation) { - e.preventDefault(); - e.stopPropagation(); - } - } - - function distance(x1, y1, x2, y2) { - return Math.sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)); - } - - function noTouchActive(e) { - return (e.touches && e.touches.length === 0); - } - - function wasPinchEvent(e) { - return (gestureState.twoTouches && e.touches.length === 1); - } - - function updateOnMultipleTouches(e) { - if (e.touches.length >= 3) { - gestureState.allowEventPropagation = true; - } else { - gestureState.allowEventPropagation = false; - } - } - - function isPinchEvent(e) { - if (e.touches && e.touches.length >= 2) { - if (e.touches[0].target === plot.getEventHolder() && - e.touches[1].target === plot.getEventHolder()) { - return true; - } - } - return false; - } - } - - $.plot.plugins.push({ - init: init, - options: options, - name: 'navigateTouch', - version: '0.3' - }); -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.touchNavigate.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.touchNavigate.js deleted file mode 100644 index b467b2e..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.touchNavigate.js +++ /dev/null @@ -1,340 +0,0 @@ -/* global jQuery */ - -(function($) { - 'use strict'; - - var options = { - pan: { - enableTouch: false, - touchMode: 'manual' - } - }; - - var ZOOM_DISTANCE_MARGIN = $.plot.uiConstants.ZOOM_DISTANCE_MARGIN; - - function init(plot) { - plot.hooks.processOptions.push(initTouchNavigation); - } - - function initTouchNavigation(plot, options) { - var gestureState = { - zoomEnable: false, - prevDistance: null, - prevTapTime: 0, - prevPanPosition: { x: 0, y: 0 }, - prevTapPosition: { x: 0, y: 0 } - }, - navigationState = { - prevTouchedAxis: 'none', - currentTouchedAxis: 'none', - touchedAxis: null, - navigationConstraint: 'unconstrained', - initialState: null, - }, - useManualPan = options.pan.touchMode === 'manual', - smartPanLock = options.pan.touchMode === 'smartLock', - useSmartPan = smartPanLock || options.pan.touchMode === 'smart', - pan, pinch, doubleTap; - - function bindEvents(plot, eventHolder) { - var o = plot.getOptions(); - - if (o.pan.interactive) { - eventHolder[0].addEventListener('panstart', pan.start, false); - eventHolder[0].addEventListener('pandrag', pan.drag, false); - eventHolder[0].addEventListener('panend', pan.end, false); - eventHolder[0].addEventListener('pinchstart', pinch.start, false); - eventHolder[0].addEventListener('pinchdrag', pinch.drag, false); - eventHolder[0].addEventListener('pinchend', pinch.end, false); - eventHolder[0].addEventListener('doubletap', doubleTap.recenterPlot, false); - } - } - - function shutdown(plot, eventHolder) { - eventHolder[0].removeEventListener('panstart', pan.start); - eventHolder[0].removeEventListener('pandrag', pan.drag); - eventHolder[0].removeEventListener('panend', pan.end); - eventHolder[0].removeEventListener('pinchstart', pinch.start); - eventHolder[0].removeEventListener('pinchdrag', pinch.drag); - eventHolder[0].removeEventListener('pinchend', pinch.end); - eventHolder[0].removeEventListener('doubletap', doubleTap.recenterPlot); - } - - pan = { - start: function(e) { - presetNavigationState(e, 'pan', gestureState); - updateData(e, 'pan', gestureState, navigationState); - - if (useSmartPan) { - var point = getPoint(e, 'pan'); - navigationState.initialState = plot.navigationState(point.x, point.y); - } - }, - - drag: function(e) { - presetNavigationState(e, 'pan', gestureState); - - if (useSmartPan) { - var point = getPoint(e, 'pan'); - plot.smartPan({ - x: navigationState.initialState.startPageX - point.x, - y: navigationState.initialState.startPageY - point.y - }, navigationState.initialState, navigationState.touchedAxis, false, smartPanLock); - } else if (useManualPan) { - plot.pan({ - left: -delta(e, 'pan', gestureState).x, - top: -delta(e, 'pan', gestureState).y, - axes: navigationState.touchedAxis - }); - updatePrevPanPosition(e, 'pan', gestureState, navigationState); - } - }, - - end: function(e) { - presetNavigationState(e, 'pan', gestureState); - - if (useSmartPan) { - plot.smartPan.end(); - } - - if (wasPinchEvent(e, gestureState)) { - updateprevPanPosition(e, 'pan', gestureState, navigationState); - } - } - }; - - var pinchDragTimeout; - pinch = { - start: function(e) { - if (pinchDragTimeout) { - clearTimeout(pinchDragTimeout); - pinchDragTimeout = null; - } - presetNavigationState(e, 'pinch', gestureState); - setPrevDistance(e, gestureState); - updateData(e, 'pinch', gestureState, navigationState); - }, - - drag: function(e) { - if (pinchDragTimeout) { - return; - } - pinchDragTimeout = setTimeout(function() { - presetNavigationState(e, 'pinch', gestureState); - plot.pan({ - left: -delta(e, 'pinch', gestureState).x, - top: -delta(e, 'pinch', gestureState).y, - axes: navigationState.touchedAxis - }); - updatePrevPanPosition(e, 'pinch', gestureState, navigationState); - - var dist = pinchDistance(e); - - if (gestureState.zoomEnable || Math.abs(dist - gestureState.prevDistance) > ZOOM_DISTANCE_MARGIN) { - zoomPlot(plot, e, gestureState, navigationState); - - //activate zoom mode - gestureState.zoomEnable = true; - } - pinchDragTimeout = null; - }, 1000 / 60); - }, - - end: function(e) { - if (pinchDragTimeout) { - clearTimeout(pinchDragTimeout); - pinchDragTimeout = null; - } - presetNavigationState(e, 'pinch', gestureState); - gestureState.prevDistance = null; - } - }; - - doubleTap = { - recenterPlot: function(e) { - if (e && e.detail && e.detail.type === 'touchmove') { - // do not recenter during touch moving; - return; - } - recenterPlotOnDoubleTap(plot, e, gestureState, navigationState); - } - }; - - if (options.pan.enableTouch === true) { - plot.hooks.bindEvents.push(bindEvents); - plot.hooks.shutdown.push(shutdown); - } - - function presetNavigationState(e, gesture, gestureState) { - navigationState.touchedAxis = getAxis(plot, e, gesture, navigationState); - if (noAxisTouched(navigationState)) { - navigationState.navigationConstraint = 'unconstrained'; - } else { - navigationState.navigationConstraint = 'axisConstrained'; - } - } - } - - $.plot.plugins.push({ - init: init, - options: options, - name: 'navigateTouch', - version: '0.3' - }); - - function recenterPlotOnDoubleTap(plot, e, gestureState, navigationState) { - checkAxesForDoubleTap(plot, e, navigationState); - if ((navigationState.currentTouchedAxis === 'x' && navigationState.prevTouchedAxis === 'x') || - (navigationState.currentTouchedAxis === 'y' && navigationState.prevTouchedAxis === 'y') || - (navigationState.currentTouchedAxis === 'none' && navigationState.prevTouchedAxis === 'none')) { - plot.recenter({ axes: navigationState.touchedAxis }); - } - } - - function checkAxesForDoubleTap(plot, e, navigationState) { - var axis = plot.getTouchedAxis(e.detail.firstTouch.x, e.detail.firstTouch.y); - if (axis[0] !== undefined) { - navigationState.prevTouchedAxis = axis[0].direction; - } - - axis = plot.getTouchedAxis(e.detail.secondTouch.x, e.detail.secondTouch.y); - if (axis[0] !== undefined) { - navigationState.touchedAxis = axis; - navigationState.currentTouchedAxis = axis[0].direction; - } - - if (noAxisTouched(navigationState)) { - navigationState.touchedAxis = null; - navigationState.prevTouchedAxis = 'none'; - navigationState.currentTouchedAxis = 'none'; - } - } - - function zoomPlot(plot, e, gestureState, navigationState) { - var offset = plot.offset(), - center = { - left: 0, - top: 0 - }, - zoomAmount = pinchDistance(e) / gestureState.prevDistance, - dist = pinchDistance(e); - - center.left = getPoint(e, 'pinch').x - offset.left; - center.top = getPoint(e, 'pinch').y - offset.top; - - // send the computed touched axis to the zoom function so that it only zooms on that one - plot.zoom({ - center: center, - amount: zoomAmount, - axes: navigationState.touchedAxis - }); - gestureState.prevDistance = dist; - } - - function wasPinchEvent(e, gestureState) { - return (gestureState.zoomEnable && e.detail.touches.length === 1); - } - - function getAxis(plot, e, gesture, navigationState) { - if (e.type === 'pinchstart') { - var axisTouch1 = plot.getTouchedAxis(e.detail.touches[0].pageX, e.detail.touches[0].pageY); - var axisTouch2 = plot.getTouchedAxis(e.detail.touches[1].pageX, e.detail.touches[1].pageY); - - if (axisTouch1.length === axisTouch2.length && axisTouch1.toString() === axisTouch2.toString()) { - return axisTouch1; - } - } else if (e.type === 'panstart') { - return plot.getTouchedAxis(e.detail.touches[0].pageX, e.detail.touches[0].pageY); - } else if (e.type === 'pinchend') { - //update axis since instead on pinch, a pan event is made - return plot.getTouchedAxis(e.detail.touches[0].pageX, e.detail.touches[0].pageY); - } else { - return navigationState.touchedAxis; - } - } - - function noAxisTouched(navigationState) { - return (!navigationState.touchedAxis || navigationState.touchedAxis.length === 0); - } - - function setPrevDistance(e, gestureState) { - gestureState.prevDistance = pinchDistance(e); - } - - function updateData(e, gesture, gestureState, navigationState) { - var axisDir, - point = getPoint(e, gesture); - - switch (navigationState.navigationConstraint) { - case 'unconstrained': - navigationState.touchedAxis = null; - gestureState.prevTapPosition = { - x: gestureState.prevPanPosition.x, - y: gestureState.prevPanPosition.y - }; - gestureState.prevPanPosition = { - x: point.x, - y: point.y - }; - break; - case 'axisConstrained': - axisDir = navigationState.touchedAxis[0].direction; - navigationState.currentTouchedAxis = axisDir; - gestureState.prevTapPosition[axisDir] = gestureState.prevPanPosition[axisDir]; - gestureState.prevPanPosition[axisDir] = point[axisDir]; - break; - default: - break; - } - } - - function distance(x1, y1, x2, y2) { - return Math.sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)); - } - - function pinchDistance(e) { - var t1 = e.detail.touches[0], - t2 = e.detail.touches[1]; - return distance(t1.pageX, t1.pageY, t2.pageX, t2.pageY); - } - - function updatePrevPanPosition(e, gesture, gestureState, navigationState) { - var point = getPoint(e, gesture); - - switch (navigationState.navigationConstraint) { - case 'unconstrained': - gestureState.prevPanPosition.x = point.x; - gestureState.prevPanPosition.y = point.y; - break; - case 'axisConstrained': - gestureState.prevPanPosition[navigationState.currentTouchedAxis] = - point[navigationState.currentTouchedAxis]; - break; - default: - break; - } - } - - function delta(e, gesture, gestureState) { - var point = getPoint(e, gesture); - - return { - x: point.x - gestureState.prevPanPosition.x, - y: point.y - gestureState.prevPanPosition.y - } - } - - function getPoint(e, gesture) { - if (gesture === 'pinch') { - return { - x: (e.detail.touches[0].pageX + e.detail.touches[1].pageX) / 2, - y: (e.detail.touches[0].pageY + e.detail.touches[1].pageY) / 2 - } - } else { - return { - x: e.detail.touches[0].pageX, - y: e.detail.touches[0].pageY - } - } - } -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.uiConstants.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.uiConstants.js deleted file mode 100644 index 627847d..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.flot.uiConstants.js +++ /dev/null @@ -1,10 +0,0 @@ -(function ($) { - 'use strict'; - $.plot.uiConstants = { - SNAPPING_CONSTANT: 20, - PANHINT_LENGTH_CONSTANT: 10, - MINOR_TICKS_COUNT_CONSTANT: 4, - TICK_LENGTH_CONSTANT: 10, - ZOOM_DISTANCE_MARGIN: 25 - }; -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.js deleted file mode 100644 index f9f969a..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.js +++ /dev/null @@ -1,9473 +0,0 @@ -/*! - * jQuery JavaScript Library v1.8.3 - * http://jquery.com/ - * - * Includes Sizzle.js - * http://sizzlejs.com/ - * - * Copyright 2012 jQuery Foundation and other contributors - * Released under the MIT license - * http://jquery.org/license - * - * Date: Tue Nov 13 2012 08:20:33 GMT-0500 (Eastern Standard Time) - */ -/* eslint-disable */ -(function( window, undefined ) { -var - // A central reference to the root jQuery(document) - rootjQuery, - - // The deferred used on DOM ready - readyList, - - // Use the correct document accordingly with window argument (sandbox) - document = window.document, - location = window.location, - navigator = window.navigator, - - // Map over jQuery in case of overwrite - _jQuery = window.jQuery, - - // Map over the $ in case of overwrite - _$ = window.$, - - // Save a reference to some core methods - core_push = Array.prototype.push, - core_slice = Array.prototype.slice, - core_indexOf = Array.prototype.indexOf, - core_toString = Object.prototype.toString, - core_hasOwn = Object.prototype.hasOwnProperty, - core_trim = String.prototype.trim, - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - // The jQuery object is actually just the init constructor 'enhanced' - return new jQuery.fn.init( selector, context, rootjQuery ); - }, - - // Used for matching numbers - core_pnum = /[\-+]?(?:\d*\.|)\d+(?:[eE][\-+]?\d+|)/.source, - - // Used for detecting and trimming whitespace - core_rnotwhite = /\S/, - core_rspace = /\s+/, - - // Make sure we trim BOM and NBSP (here's looking at you, Safari 5.0 and IE) - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - rquickExpr = /^(?:[^#<]*(<[\w\W]+>)[^>]*$|#([\w\-]*)$)/, - - // Match a standalone tag - rsingleTag = /^<(\w+)\s*\/?>(?:<\/\1>|)$/, - - // JSON RegExp - rvalidchars = /^[\],:{}\s]*$/, - rvalidbraces = /(?:^|:|,)(?:\s*\[)+/g, - rvalidescape = /\\(?:["\\\/bfnrt]|u[\da-fA-F]{4})/g, - rvalidtokens = /"[^"\\\r\n]*"|true|false|null|-?(?:\d\d*\.|)\d+(?:[eE][\-+]?\d+|)/g, - - // Matches dashed string for camelizing - rmsPrefix = /^-ms-/, - rdashAlpha = /-([\da-z])/gi, - - // Used by jQuery.camelCase as callback to replace() - fcamelCase = function( all, letter ) { - return ( letter + "" ).toUpperCase(); - }, - - // The ready event handler and self cleanup method - DOMContentLoaded = function() { - if ( document.addEventListener ) { - document.removeEventListener( "DOMContentLoaded", DOMContentLoaded, false ); - jQuery.ready(); - } else if ( document.readyState === "complete" ) { - // we're here because readyState === "complete" in oldIE - // which is good enough for us to call the dom ready! - document.detachEvent( "onreadystatechange", DOMContentLoaded ); - jQuery.ready(); - } - }, - - // [[Class]] -> type pairs - class2type = {}; - -jQuery.fn = jQuery.prototype = { - constructor: jQuery, - init: function( selector, context, rootjQuery ) { - var match, elem, ret, doc; - - // Handle $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Handle $(DOMElement) - if ( selector.nodeType ) { - this.context = this[0] = selector; - this.length = 1; - return this; - } - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector.charAt(0) === "<" && selector.charAt( selector.length - 1 ) === ">" && selector.length >= 3 ) { - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && (match[1] || !context) ) { - - // HANDLE: $(html) -> $(array) - if ( match[1] ) { - context = context instanceof jQuery ? context[0] : context; - doc = ( context && context.nodeType ? context.ownerDocument || context : document ); - - // scripts is true for back-compat - selector = jQuery.parseHTML( match[1], doc, true ); - if ( rsingleTag.test( match[1] ) && jQuery.isPlainObject( context ) ) { - this.attr.call( selector, context, true ); - } - - return jQuery.merge( this, selector ); - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[2] ); - - // Check parentNode to catch when Blackberry 4.6 returns - // nodes that are no longer in the document #6963 - if ( elem && elem.parentNode ) { - // Handle the case where IE and Opera return items - // by name instead of ID - if ( elem.id !== match[2] ) { - return rootjQuery.find( selector ); - } - - // Otherwise, we inject the element directly into the jQuery object - this.length = 1; - this[0] = elem; - } - - this.context = document; - this.selector = selector; - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || rootjQuery ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( jQuery.isFunction( selector ) ) { - return rootjQuery.ready( selector ); - } - - if ( selector.selector !== undefined ) { - this.selector = selector.selector; - this.context = selector.context; - } - - return jQuery.makeArray( selector, this ); - }, - - // Start with an empty selector - selector: "", - - // The current version of jQuery being used - jquery: "1.8.3", - - // The default length of a jQuery object is 0 - length: 0, - - // The number of elements contained in the matched element set - size: function() { - return this.length; - }, - - toArray: function() { - return core_slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - return num == null ? - - // Return a 'clean' array - this.toArray() : - - // Return just the object - ( num < 0 ? this[ this.length + num ] : this[ num ] ); - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems, name, selector ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - ret.context = this.context; - - if ( name === "find" ) { - ret.selector = this.selector + ( this.selector ? " " : "" ) + selector; - } else if ( name ) { - ret.selector = this.selector + "." + name + "(" + selector + ")"; - } - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - // (You can seed the arguments with an array of args, but this is - // only used internally.) - each: function( callback, args ) { - return jQuery.each( this, callback, args ); - }, - - ready: function( fn ) { - // Add the callback - jQuery.ready.promise().done( fn ); - - return this; - }, - - eq: function( i ) { - i = +i; - return i === -1 ? - this.slice( i ) : - this.slice( i, i + 1 ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - slice: function() { - return this.pushStack( core_slice.apply( this, arguments ), - "slice", core_slice.call(arguments).join(",") ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map(this, function( elem, i ) { - return callback.call( elem, i, elem ); - })); - }, - - end: function() { - return this.prevObject || this.constructor(null); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: core_push, - sort: [].sort, - splice: [].splice -}; - -// Give the init function the jQuery prototype for later instantiation -jQuery.fn.init.prototype = jQuery.fn; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[0] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - target = arguments[1] || {}; - // skip the boolean and the target - i = 2; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !jQuery.isFunction(target) ) { - target = {}; - } - - // extend jQuery itself if only one argument is passed - if ( length === i ) { - target = this; - --i; - } - - for ( ; i < length; i++ ) { - // Only deal with non-null/undefined values - if ( (options = arguments[ i ]) != null ) { - // Extend the base object - for ( name in options ) { - src = target[ name ]; - copy = options[ name ]; - - // Prevent never-ending loop - if ( target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject(copy) || (copyIsArray = jQuery.isArray(copy)) ) ) { - if ( copyIsArray ) { - copyIsArray = false; - clone = src && jQuery.isArray(src) ? src : []; - - } else { - clone = src && jQuery.isPlainObject(src) ? src : {}; - } - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend({ - noConflict: function( deep ) { - if ( window.$ === jQuery ) { - window.$ = _$; - } - - if ( deep && window.jQuery === jQuery ) { - window.jQuery = _jQuery; - } - - return jQuery; - }, - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Hold (or release) the ready event - holdReady: function( hold ) { - if ( hold ) { - jQuery.readyWait++; - } else { - jQuery.ready( true ); - } - }, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443). - if ( !document.body ) { - return setTimeout( jQuery.ready, 1 ); - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - - // Trigger any bound ready events - if ( jQuery.fn.trigger ) { - jQuery( document ).trigger("ready").off("ready"); - } - }, - - // See test/unit/core.js for details concerning isFunction. - // Since version 1.3, DOM methods and functions like alert - // aren't supported. They return false on IE (#2968). - isFunction: function( obj ) { - return jQuery.type(obj) === "function"; - }, - - isArray: Array.isArray || function( obj ) { - return jQuery.type(obj) === "array"; - }, - - isWindow: function( obj ) { - return obj != null && obj == obj.window; - }, - - isNumeric: function( obj ) { - return !isNaN( parseFloat(obj) ) && isFinite( obj ); - }, - - type: function( obj ) { - return obj == null ? - String( obj ) : - class2type[ core_toString.call(obj) ] || "object"; - }, - - isPlainObject: function( obj ) { - // Must be an Object. - // Because of IE, we also have to check the presence of the constructor property. - // Make sure that DOM nodes and window objects don't pass through, as well - if ( !obj || jQuery.type(obj) !== "object" || obj.nodeType || jQuery.isWindow( obj ) ) { - return false; - } - - try { - // Not own constructor property must be Object - if ( obj.constructor && - !core_hasOwn.call(obj, "constructor") && - !core_hasOwn.call(obj.constructor.prototype, "isPrototypeOf") ) { - return false; - } - } catch ( e ) { - // IE8,9 Will throw exceptions on certain host objects #9897 - return false; - } - - // Own properties are enumerated firstly, so to speed up, - // if last one is own, then all properties are own. - - var key; - for ( key in obj ) {} - - return key === undefined || core_hasOwn.call( obj, key ); - }, - - isEmptyObject: function( obj ) { - var name; - for ( name in obj ) { - return false; - } - return true; - }, - - error: function( msg ) { - throw new Error( msg ); - }, - - // data: string of html - // context (optional): If specified, the fragment will be created in this context, defaults to document - // scripts (optional): If true, will include scripts passed in the html string - parseHTML: function( data, context, scripts ) { - var parsed; - if ( !data || typeof data !== "string" ) { - return null; - } - if ( typeof context === "boolean" ) { - scripts = context; - context = 0; - } - context = context || document; - - // Single tag - if ( (parsed = rsingleTag.exec( data )) ) { - return [ context.createElement( parsed[1] ) ]; - } - - parsed = jQuery.buildFragment( [ data ], context, scripts ? null : [] ); - return jQuery.merge( [], - (parsed.cacheable ? jQuery.clone( parsed.fragment ) : parsed.fragment).childNodes ); - }, - - parseJSON: function( data ) { - if ( !data || typeof data !== "string") { - return null; - } - - // Make sure leading/trailing whitespace is removed (IE can't handle it) - data = jQuery.trim( data ); - - // Attempt to parse using the native JSON parser first - if ( window.JSON && window.JSON.parse ) { - return window.JSON.parse( data ); - } - - // Make sure the incoming data is actual JSON - // Logic borrowed from http://json.org/json2.js - if ( rvalidchars.test( data.replace( rvalidescape, "@" ) - .replace( rvalidtokens, "]" ) - .replace( rvalidbraces, "")) ) { - - return ( new Function( "return " + data ) )(); - - } - jQuery.error( "Invalid JSON: " + data ); - }, - - // Cross-browser xml parsing - parseXML: function( data ) { - var xml, tmp; - if ( !data || typeof data !== "string" ) { - return null; - } - try { - if ( window.DOMParser ) { // Standard - tmp = new DOMParser(); - xml = tmp.parseFromString( data , "text/xml" ); - } else { // IE - xml = new ActiveXObject( "Microsoft.XMLDOM" ); - xml.async = "false"; - xml.loadXML( data ); - } - } catch( e ) { - xml = undefined; - } - if ( !xml || !xml.documentElement || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; - }, - - noop: function() {}, - - // Evaluates a script in a global context - // Workarounds based on findings by Jim Driscoll - // http://weblogs.java.net/blog/driscoll/archive/2009/09/08/eval-javascript-global-context - globalEval: function( data ) { - if ( data && core_rnotwhite.test( data ) ) { - // We use execScript on Internet Explorer - // We use an anonymous function so that context is window - // rather than jQuery in Firefox - ( window.execScript || function( data ) { - window[ "eval" ].call( window, data ); - } )( data ); - } - }, - - // Convert dashed to camelCase; used by the css and data modules - // Microsoft forgot to hump their vendor prefix (#9572) - camelCase: function( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); - }, - - nodeName: function( elem, name ) { - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - }, - - // args is for internal usage only - each: function( obj, callback, args ) { - var name, - i = 0, - length = obj.length, - isObj = length === undefined || jQuery.isFunction( obj ); - - if ( args ) { - if ( isObj ) { - for ( name in obj ) { - if ( callback.apply( obj[ name ], args ) === false ) { - break; - } - } - } else { - for ( ; i < length; ) { - if ( callback.apply( obj[ i++ ], args ) === false ) { - break; - } - } - } - - // A special, fast, case for the most common use of each - } else { - if ( isObj ) { - for ( name in obj ) { - if ( callback.call( obj[ name ], name, obj[ name ] ) === false ) { - break; - } - } - } else { - for ( ; i < length; ) { - if ( callback.call( obj[ i ], i, obj[ i++ ] ) === false ) { - break; - } - } - } - } - - return obj; - }, - - // Use native String.trim function wherever possible - trim: core_trim && !core_trim.call("\uFEFF\xA0") ? - function( text ) { - return text == null ? - "" : - core_trim.call( text ); - } : - - // Otherwise use our own trimming functionality - function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var type, - ret = results || []; - - if ( arr != null ) { - // The window, strings (and functions) also have 'length' - // Tweaked logic slightly to handle Blackberry 4.7 RegExp issues #6930 - type = jQuery.type( arr ); - - if ( arr.length == null || type === "string" || type === "function" || type === "regexp" || jQuery.isWindow( arr ) ) { - core_push.call( ret, arr ); - } else { - jQuery.merge( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - var len; - - if ( arr ) { - if ( core_indexOf ) { - return core_indexOf.call( arr, elem, i ); - } - - len = arr.length; - i = i ? i < 0 ? Math.max( 0, len + i ) : i : 0; - - for ( ; i < len; i++ ) { - // Skip accessing in sparse arrays - if ( i in arr && arr[ i ] === elem ) { - return i; - } - } - } - - return -1; - }, - - merge: function( first, second ) { - var l = second.length, - i = first.length, - j = 0; - - if ( typeof l === "number" ) { - for ( ; j < l; j++ ) { - first[ i++ ] = second[ j ]; - } - - } else { - while ( second[j] !== undefined ) { - first[ i++ ] = second[ j++ ]; - } - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, inv ) { - var retVal, - ret = [], - i = 0, - length = elems.length; - inv = !!inv; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - retVal = !!callback( elems[ i ], i ); - if ( inv !== retVal ) { - ret.push( elems[ i ] ); - } - } - - return ret; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var value, key, - ret = [], - i = 0, - length = elems.length, - // jquery objects are treated as arrays - isArray = elems instanceof jQuery || length !== undefined && typeof length === "number" && ( ( length > 0 && elems[ 0 ] && elems[ length -1 ] ) || length === 0 || jQuery.isArray( elems ) ) ; - - // Go through the array, translating each of the items to their - if ( isArray ) { - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret[ ret.length ] = value; - } - } - - // Go through every key on the object, - } else { - for ( key in elems ) { - value = callback( elems[ key ], key, arg ); - - if ( value != null ) { - ret[ ret.length ] = value; - } - } - } - - // Flatten any nested arrays - return ret.concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // Bind a function to a context, optionally partially applying any - // arguments. - proxy: function( fn, context ) { - var tmp, args, proxy; - - if ( typeof context === "string" ) { - tmp = fn[ context ]; - context = fn; - fn = tmp; - } - - // Quick check to determine if target is callable, in the spec - // this throws a TypeError, but we will just return undefined. - if ( !jQuery.isFunction( fn ) ) { - return undefined; - } - - // Simulated bind - args = core_slice.call( arguments, 2 ); - proxy = function() { - return fn.apply( context, args.concat( core_slice.call( arguments ) ) ); - }; - - // Set the guid of unique handler to the same of original handler, so it can be removed - proxy.guid = fn.guid = fn.guid || jQuery.guid++; - - return proxy; - }, - - // Multifunctional method to get and set values of a collection - // The value/s can optionally be executed if it's a function - access: function( elems, fn, key, value, chainable, emptyGet, pass ) { - var exec, - bulk = key == null, - i = 0, - length = elems.length; - - // Sets many values - if ( key && typeof key === "object" ) { - for ( i in key ) { - jQuery.access( elems, fn, i, key[i], 1, emptyGet, value ); - } - chainable = 1; - - // Sets one value - } else if ( value !== undefined ) { - // Optionally, function values get executed if exec is true - exec = pass === undefined && jQuery.isFunction( value ); - - if ( bulk ) { - // Bulk operations only iterate when executing function values - if ( exec ) { - exec = fn; - fn = function( elem, key, value ) { - return exec.call( jQuery( elem ), value ); - }; - - // Otherwise they run against the entire set - } else { - fn.call( elems, value ); - fn = null; - } - } - - if ( fn ) { - for (; i < length; i++ ) { - fn( elems[i], key, exec ? value.call( elems[i], i, fn( elems[i], key ) ) : value, pass ); - } - } - - chainable = 1; - } - - return chainable ? - elems : - - // Gets - bulk ? - fn.call( elems ) : - length ? fn( elems[0], key ) : emptyGet; - }, - - now: function() { - return ( new Date() ).getTime(); - } -}); - -jQuery.ready.promise = function( obj ) { - if ( !readyList ) { - - readyList = jQuery.Deferred(); - - // Catch cases where $(document).ready() is called after the browser event has already occurred. - // we once tried to use readyState "interactive" here, but it caused issues like the one - // discovered by ChrisS here: http://bugs.jquery.com/ticket/12282#comment:15 - if ( document.readyState === "complete" ) { - // Handle it asynchronously to allow scripts the opportunity to delay ready - setTimeout( jQuery.ready, 1 ); - - // Standards-based browsers support DOMContentLoaded - } else if ( document.addEventListener ) { - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", DOMContentLoaded, false ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", jQuery.ready, false ); - - // If IE event model is used - } else { - // Ensure firing before onload, maybe late but safe also for iframes - document.attachEvent( "onreadystatechange", DOMContentLoaded ); - - // A fallback to window.onload, that will always work - window.attachEvent( "onload", jQuery.ready ); - - // If IE and not a frame - // continually check to see if the document is ready - var top = false; - - try { - top = window.frameElement == null && document.documentElement; - } catch(e) {} - - if ( top && top.doScroll ) { - (function doScrollCheck() { - if ( !jQuery.isReady ) { - - try { - // Use the trick by Diego Perini - // http://javascript.nwbox.com/IEContentLoaded/ - top.doScroll("left"); - } catch(e) { - return setTimeout( doScrollCheck, 50 ); - } - - // and execute any waiting functions - jQuery.ready(); - } - })(); - } - } - } - return readyList.promise( obj ); -}; - -// Populate the class2type map -jQuery.each("Boolean Number String Function Array Date RegExp Object".split(" "), function(i, name) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -}); - -// All jQuery objects should point back to these -rootjQuery = jQuery(document); -// String to Object options format cache -var optionsCache = {}; - -// Convert String-formatted options into Object-formatted ones and store in cache -function createOptions( options ) { - var object = optionsCache[ options ] = {}; - jQuery.each( options.split( core_rspace ), function( _, flag ) { - object[ flag ] = true; - }); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - ( optionsCache[ options ] || createOptions( options ) ) : - jQuery.extend( {}, options ); - - var // Last fire value (for non-forgettable lists) - memory, - // Flag to know if list was already fired - fired, - // Flag to know if list is currently firing - firing, - // First callback to fire (used internally by add and fireWith) - firingStart, - // End of the loop when firing - firingLength, - // Index of currently firing callback (modified by remove if needed) - firingIndex, - // Actual callback list - list = [], - // Stack of fire calls for repeatable lists - stack = !options.once && [], - // Fire callbacks - fire = function( data ) { - memory = options.memory && data; - fired = true; - firingIndex = firingStart || 0; - firingStart = 0; - firingLength = list.length; - firing = true; - for ( ; list && firingIndex < firingLength; firingIndex++ ) { - if ( list[ firingIndex ].apply( data[ 0 ], data[ 1 ] ) === false && options.stopOnFalse ) { - memory = false; // To prevent further calls using add - break; - } - } - firing = false; - if ( list ) { - if ( stack ) { - if ( stack.length ) { - fire( stack.shift() ); - } - } else if ( memory ) { - list = []; - } else { - self.disable(); - } - } - }, - // Actual Callbacks object - self = { - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - // First, we save the current length - var start = list.length; - (function add( args ) { - jQuery.each( args, function( _, arg ) { - var type = jQuery.type( arg ); - if ( type === "function" ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && type !== "string" ) { - // Inspect recursively - add( arg ); - } - }); - })( arguments ); - // Do we need to add the callbacks to the - // current firing batch? - if ( firing ) { - firingLength = list.length; - // With memory, if we're not firing then - // we should call right away - } else if ( memory ) { - firingStart = start; - fire( memory ); - } - } - return this; - }, - // Remove a callback from the list - remove: function() { - if ( list ) { - jQuery.each( arguments, function( _, arg ) { - var index; - while( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - // Handle firing indexes - if ( firing ) { - if ( index <= firingLength ) { - firingLength--; - } - if ( index <= firingIndex ) { - firingIndex--; - } - } - } - }); - } - return this; - }, - // Control if a given callback is in the list - has: function( fn ) { - return jQuery.inArray( fn, list ) > -1; - }, - // Remove all callbacks from the list - empty: function() { - list = []; - return this; - }, - // Have the list do nothing anymore - disable: function() { - list = stack = memory = undefined; - return this; - }, - // Is it disabled? - disabled: function() { - return !list; - }, - // Lock the list in its current state - lock: function() { - stack = undefined; - if ( !memory ) { - self.disable(); - } - return this; - }, - // Is it locked? - locked: function() { - return !stack; - }, - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - if ( list && ( !fired || stack ) ) { - if ( firing ) { - stack.push( args ); - } else { - fire( args ); - } - } - return this; - }, - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; -jQuery.extend({ - - Deferred: function( func ) { - var tuples = [ - // action, add listener, listener list, final state - [ "resolve", "done", jQuery.Callbacks("once memory"), "resolved" ], - [ "reject", "fail", jQuery.Callbacks("once memory"), "rejected" ], - [ "notify", "progress", jQuery.Callbacks("memory") ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - then: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - return jQuery.Deferred(function( newDefer ) { - jQuery.each( tuples, function( i, tuple ) { - var action = tuple[ 0 ], - fn = fns[ i ]; - // deferred[ done | fail | progress ] for forwarding actions to newDefer - deferred[ tuple[1] ]( jQuery.isFunction( fn ) ? - function() { - var returned = fn.apply( this, arguments ); - if ( returned && jQuery.isFunction( returned.promise ) ) { - returned.promise() - .done( newDefer.resolve ) - .fail( newDefer.reject ) - .progress( newDefer.notify ); - } else { - newDefer[ action + "With" ]( this === deferred ? newDefer : this, [ returned ] ); - } - } : - newDefer[ action ] - ); - }); - fns = null; - }).promise(); - }, - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Keep pipe for back-compat - promise.pipe = promise.then; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 3 ]; - - // promise[ done | fail | progress ] = list.add - promise[ tuple[1] ] = list.add; - - // Handle state - if ( stateString ) { - list.add(function() { - // state = [ resolved | rejected ] - state = stateString; - - // [ reject_list | resolve_list ].disable; progress_list.lock - }, tuples[ i ^ 1 ][ 2 ].disable, tuples[ 2 ][ 2 ].lock ); - } - - // deferred[ resolve | reject | notify ] = list.fire - deferred[ tuple[0] ] = list.fire; - deferred[ tuple[0] + "With" ] = list.fireWith; - }); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( subordinate /* , ..., subordinateN */ ) { - var i = 0, - resolveValues = core_slice.call( arguments ), - length = resolveValues.length, - - // the count of uncompleted subordinates - remaining = length !== 1 || ( subordinate && jQuery.isFunction( subordinate.promise ) ) ? length : 0, - - // the master Deferred. If resolveValues consist of only a single Deferred, just use that. - deferred = remaining === 1 ? subordinate : jQuery.Deferred(), - - // Update function for both resolve and progress values - updateFunc = function( i, contexts, values ) { - return function( value ) { - contexts[ i ] = this; - values[ i ] = arguments.length > 1 ? core_slice.call( arguments ) : value; - if( values === progressValues ) { - deferred.notifyWith( contexts, values ); - } else if ( !( --remaining ) ) { - deferred.resolveWith( contexts, values ); - } - }; - }, - - progressValues, progressContexts, resolveContexts; - - // add listeners to Deferred subordinates; treat others as resolved - if ( length > 1 ) { - progressValues = new Array( length ); - progressContexts = new Array( length ); - resolveContexts = new Array( length ); - for ( ; i < length; i++ ) { - if ( resolveValues[ i ] && jQuery.isFunction( resolveValues[ i ].promise ) ) { - resolveValues[ i ].promise() - .done( updateFunc( i, resolveContexts, resolveValues ) ) - .fail( deferred.reject ) - .progress( updateFunc( i, progressContexts, progressValues ) ); - } else { - --remaining; - } - } - } - - // if we're not waiting on anything, resolve the master - if ( !remaining ) { - deferred.resolveWith( resolveContexts, resolveValues ); - } - - return deferred.promise(); - } -}); -jQuery.support = (function() { - - var support, - all, - a, - select, - opt, - input, - fragment, - eventName, - i, - isSupported, - clickFn, - div = document.createElement("div"); - - // Setup - div.setAttribute( "className", "t" ); - div.innerHTML = "
a"; - - // Support tests won't run in some limited or non-browser environments - all = div.getElementsByTagName("*"); - a = div.getElementsByTagName("a")[ 0 ]; - if ( !all || !a || !all.length ) { - return {}; - } - - // First batch of tests - select = document.createElement("select"); - opt = select.appendChild( document.createElement("option") ); - input = div.getElementsByTagName("input")[ 0 ]; - - a.style.cssText = "top:1px;float:left;opacity:.5"; - support = { - // IE strips leading whitespace when .innerHTML is used - leadingWhitespace: ( div.firstChild.nodeType === 3 ), - - // Make sure that tbody elements aren't automatically inserted - // IE will insert them into empty tables - tbody: !div.getElementsByTagName("tbody").length, - - // Make sure that link elements get serialized correctly by innerHTML - // This requires a wrapper element in IE - htmlSerialize: !!div.getElementsByTagName("link").length, - - // Get the style information from getAttribute - // (IE uses .cssText instead) - style: /top/.test( a.getAttribute("style") ), - - // Make sure that URLs aren't manipulated - // (IE normalizes it by default) - hrefNormalized: ( a.getAttribute("href") === "/a" ), - - // Make sure that element opacity exists - // (IE uses filter instead) - // Use a regex to work around a WebKit issue. See #5145 - opacity: /^0.5/.test( a.style.opacity ), - - // Verify style float existence - // (IE uses styleFloat instead of cssFloat) - cssFloat: !!a.style.cssFloat, - - // Make sure that if no value is specified for a checkbox - // that it defaults to "on". - // (WebKit defaults to "" instead) - checkOn: ( input.value === "on" ), - - // Make sure that a selected-by-default option has a working selected property. - // (WebKit defaults to false instead of true, IE too, if it's in an optgroup) - optSelected: opt.selected, - - // Test setAttribute on camelCase class. If it works, we need attrFixes when doing get/setAttribute (ie6/7) - getSetAttribute: div.className !== "t", - - // Tests for enctype support on a form (#6743) - enctype: !!document.createElement("form").enctype, - - // Makes sure cloning an html5 element does not cause problems - // Where outerHTML is undefined, this still works - html5Clone: document.createElement("nav").cloneNode( true ).outerHTML !== "<:nav>", - - // jQuery.support.boxModel DEPRECATED in 1.8 since we don't support Quirks Mode - boxModel: ( document.compatMode === "CSS1Compat" ), - - // Will be defined later - submitBubbles: true, - changeBubbles: true, - focusinBubbles: false, - deleteExpando: true, - noCloneEvent: true, - inlineBlockNeedsLayout: false, - shrinkWrapBlocks: false, - reliableMarginRight: true, - boxSizingReliable: true, - pixelPosition: false - }; - - // Make sure checked status is properly cloned - input.checked = true; - support.noCloneChecked = input.cloneNode( true ).checked; - - // Make sure that the options inside disabled selects aren't marked as disabled - // (WebKit marks them as disabled) - select.disabled = true; - support.optDisabled = !opt.disabled; - - // Test to see if it's possible to delete an expando from an element - // Fails in Internet Explorer - try { - delete div.test; - } catch( e ) { - support.deleteExpando = false; - } - - if ( !div.addEventListener && div.attachEvent && div.fireEvent ) { - div.attachEvent( "onclick", clickFn = function() { - // Cloning a node shouldn't copy over any - // bound event handlers (IE does this) - support.noCloneEvent = false; - }); - div.cloneNode( true ).fireEvent("onclick"); - div.detachEvent( "onclick", clickFn ); - } - - // Check if a radio maintains its value - // after being appended to the DOM - input = document.createElement("input"); - input.value = "t"; - input.setAttribute( "type", "radio" ); - support.radioValue = input.value === "t"; - - input.setAttribute( "checked", "checked" ); - - // #11217 - WebKit loses check when the name is after the checked attribute - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - fragment = document.createDocumentFragment(); - fragment.appendChild( div.lastChild ); - - // WebKit doesn't clone checked state correctly in fragments - support.checkClone = fragment.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Check if a disconnected checkbox will retain its checked - // value of true after appended to the DOM (IE6/7) - support.appendChecked = input.checked; - - fragment.removeChild( input ); - fragment.appendChild( div ); - - // Technique from Juriy Zaytsev - // http://perfectionkills.com/detecting-event-support-without-browser-sniffing/ - // We only care about the case where non-standard event systems - // are used, namely in IE. Short-circuiting here helps us to - // avoid an eval call (in setAttribute) which can cause CSP - // to go haywire. See: https://developer.mozilla.org/en/Security/CSP - if ( div.attachEvent ) { - for ( i in { - submit: true, - change: true, - focusin: true - }) { - eventName = "on" + i; - isSupported = ( eventName in div ); - if ( !isSupported ) { - div.setAttribute( eventName, "return;" ); - isSupported = ( typeof div[ eventName ] === "function" ); - } - support[ i + "Bubbles" ] = isSupported; - } - } - - // Run tests that need a body at doc ready - jQuery(function() { - var container, div, tds, marginDiv, - divReset = "padding:0;margin:0;border:0;display:block;overflow:hidden;", - body = document.getElementsByTagName("body")[0]; - - if ( !body ) { - // Return for frameset docs that don't have a body - return; - } - - container = document.createElement("div"); - container.style.cssText = "visibility:hidden;border:0;width:0;height:0;position:static;top:0;margin-top:1px"; - body.insertBefore( container, body.firstChild ); - - // Construct the test element - div = document.createElement("div"); - container.appendChild( div ); - - // Check if table cells still have offsetWidth/Height when they are set - // to display:none and there are still other visible table cells in a - // table row; if so, offsetWidth/Height are not reliable for use when - // determining if an element has been hidden directly using - // display:none (it is still safe to use offsets if a parent element is - // hidden; don safety goggles and see bug #4512 for more information). - // (only IE 8 fails this test) - div.innerHTML = "
t
"; - tds = div.getElementsByTagName("td"); - tds[ 0 ].style.cssText = "padding:0;margin:0;border:0;display:none"; - isSupported = ( tds[ 0 ].offsetHeight === 0 ); - - tds[ 0 ].style.display = ""; - tds[ 1 ].style.display = "none"; - - // Check if empty table cells still have offsetWidth/Height - // (IE <= 8 fail this test) - support.reliableHiddenOffsets = isSupported && ( tds[ 0 ].offsetHeight === 0 ); - - // Check box-sizing and margin behavior - div.innerHTML = ""; - div.style.cssText = "box-sizing:border-box;-moz-box-sizing:border-box;-webkit-box-sizing:border-box;padding:1px;border:1px;display:block;width:4px;margin-top:1%;position:absolute;top:1%;"; - support.boxSizing = ( div.offsetWidth === 4 ); - support.doesNotIncludeMarginInBodyOffset = ( body.offsetTop !== 1 ); - - // NOTE: To any future maintainer, we've window.getComputedStyle - // because jsdom on node.js will break without it. - if ( window.getComputedStyle ) { - support.pixelPosition = ( window.getComputedStyle( div, null ) || {} ).top !== "1%"; - support.boxSizingReliable = ( window.getComputedStyle( div, null ) || { width: "4px" } ).width === "4px"; - - // Check if div with explicit width and no margin-right incorrectly - // gets computed margin-right based on width of container. For more - // info see bug #3333 - // Fails in WebKit before Feb 2011 nightlies - // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right - marginDiv = document.createElement("div"); - marginDiv.style.cssText = div.style.cssText = divReset; - marginDiv.style.marginRight = marginDiv.style.width = "0"; - div.style.width = "1px"; - div.appendChild( marginDiv ); - support.reliableMarginRight = - !parseFloat( ( window.getComputedStyle( marginDiv, null ) || {} ).marginRight ); - } - - if ( typeof div.style.zoom !== "undefined" ) { - // Check if natively block-level elements act like inline-block - // elements when setting their display to 'inline' and giving - // them layout - // (IE < 8 does this) - div.innerHTML = ""; - div.style.cssText = divReset + "width:1px;padding:1px;display:inline;zoom:1"; - support.inlineBlockNeedsLayout = ( div.offsetWidth === 3 ); - - // Check if elements with layout shrink-wrap their children - // (IE 6 does this) - div.style.display = "block"; - div.style.overflow = "visible"; - div.innerHTML = "
"; - div.firstChild.style.width = "5px"; - support.shrinkWrapBlocks = ( div.offsetWidth !== 3 ); - - container.style.zoom = 1; - } - - // Null elements to avoid leaks in IE - body.removeChild( container ); - container = div = tds = marginDiv = null; - }); - - // Null elements to avoid leaks in IE - fragment.removeChild( div ); - all = a = select = opt = input = fragment = div = null; - - return support; -})(); -var rbrace = /(?:\{[\s\S]*\}|\[[\s\S]*\])$/, - rmultiDash = /([A-Z])/g; - -jQuery.extend({ - cache: {}, - - deletedIds: [], - - // Remove at next major release (1.9/2.0) - uuid: 0, - - // Unique for each copy of jQuery on the page - // Non-digits removed to match rinlinejQuery - expando: "jQuery" + ( jQuery.fn.jquery + Math.random() ).replace( /\D/g, "" ), - - // The following elements throw uncatchable exceptions if you - // attempt to add expando properties to them. - noData: { - "embed": true, - // Ban all objects except for Flash (which handle expandos) - "object": "clsid:D27CDB6E-AE6D-11cf-96B8-444553540000", - "applet": true - }, - - hasData: function( elem ) { - elem = elem.nodeType ? jQuery.cache[ elem[jQuery.expando] ] : elem[ jQuery.expando ]; - return !!elem && !isEmptyDataObject( elem ); - }, - - data: function( elem, name, data, pvt /* Internal Use Only */ ) { - if ( !jQuery.acceptData( elem ) ) { - return; - } - - var thisCache, ret, - internalKey = jQuery.expando, - getByName = typeof name === "string", - - // We have to handle DOM nodes and JS objects differently because IE6-7 - // can't GC object references properly across the DOM-JS boundary - isNode = elem.nodeType, - - // Only DOM nodes need the global jQuery cache; JS object data is - // attached directly to the object so GC can occur automatically - cache = isNode ? jQuery.cache : elem, - - // Only defining an ID for JS objects if its cache already exists allows - // the code to shortcut on the same path as a DOM node with no cache - id = isNode ? elem[ internalKey ] : elem[ internalKey ] && internalKey; - - // Avoid doing any more work than we need to when trying to get data on an - // object that has no data at all - if ( (!id || !cache[id] || (!pvt && !cache[id].data)) && getByName && data === undefined ) { - return; - } - - if ( !id ) { - // Only DOM nodes need a new unique ID for each element since their data - // ends up in the global cache - if ( isNode ) { - elem[ internalKey ] = id = jQuery.deletedIds.pop() || jQuery.guid++; - } else { - id = internalKey; - } - } - - if ( !cache[ id ] ) { - cache[ id ] = {}; - - // Avoids exposing jQuery metadata on plain JS objects when the object - // is serialized using JSON.stringify - if ( !isNode ) { - cache[ id ].toJSON = jQuery.noop; - } - } - - // An object can be passed to jQuery.data instead of a key/value pair; this gets - // shallow copied over onto the existing cache - if ( typeof name === "object" || typeof name === "function" ) { - if ( pvt ) { - cache[ id ] = jQuery.extend( cache[ id ], name ); - } else { - cache[ id ].data = jQuery.extend( cache[ id ].data, name ); - } - } - - thisCache = cache[ id ]; - - // jQuery data() is stored in a separate object inside the object's internal data - // cache in order to avoid key collisions between internal data and user-defined - // data. - if ( !pvt ) { - if ( !thisCache.data ) { - thisCache.data = {}; - } - - thisCache = thisCache.data; - } - - if ( data !== undefined ) { - thisCache[ jQuery.camelCase( name ) ] = data; - } - - // Check for both converted-to-camel and non-converted data property names - // If a data property was specified - if ( getByName ) { - - // First Try to find as-is property data - ret = thisCache[ name ]; - - // Test for null|undefined property data - if ( ret == null ) { - - // Try to find the camelCased property - ret = thisCache[ jQuery.camelCase( name ) ]; - } - } else { - ret = thisCache; - } - - return ret; - }, - - removeData: function( elem, name, pvt /* Internal Use Only */ ) { - if ( !jQuery.acceptData( elem ) ) { - return; - } - - var thisCache, i, l, - - isNode = elem.nodeType, - - // See jQuery.data for more information - cache = isNode ? jQuery.cache : elem, - id = isNode ? elem[ jQuery.expando ] : jQuery.expando; - - // If there is already no cache entry for this object, there is no - // purpose in continuing - if ( !cache[ id ] ) { - return; - } - - if ( name ) { - - thisCache = pvt ? cache[ id ] : cache[ id ].data; - - if ( thisCache ) { - - // Support array or space separated string names for data keys - if ( !jQuery.isArray( name ) ) { - - // try the string as a key before any manipulation - if ( name in thisCache ) { - name = [ name ]; - } else { - - // split the camel cased version by spaces unless a key with the spaces exists - name = jQuery.camelCase( name ); - if ( name in thisCache ) { - name = [ name ]; - } else { - name = name.split(" "); - } - } - } - - for ( i = 0, l = name.length; i < l; i++ ) { - delete thisCache[ name[i] ]; - } - - // If there is no data left in the cache, we want to continue - // and let the cache object itself get destroyed - if ( !( pvt ? isEmptyDataObject : jQuery.isEmptyObject )( thisCache ) ) { - return; - } - } - } - - // See jQuery.data for more information - if ( !pvt ) { - delete cache[ id ].data; - - // Don't destroy the parent cache unless the internal data object - // had been the only thing left in it - if ( !isEmptyDataObject( cache[ id ] ) ) { - return; - } - } - - // Destroy the cache - if ( isNode ) { - jQuery.cleanData( [ elem ], true ); - - // Use delete when supported for expandos or `cache` is not a window per isWindow (#10080) - } else if ( jQuery.support.deleteExpando || cache != cache.window ) { - delete cache[ id ]; - - // When all else fails, null - } else { - cache[ id ] = null; - } - }, - - // For internal use only. - _data: function( elem, name, data ) { - return jQuery.data( elem, name, data, true ); - }, - - // A method for determining if a DOM node can handle the data expando - acceptData: function( elem ) { - var noData = elem.nodeName && jQuery.noData[ elem.nodeName.toLowerCase() ]; - - // nodes accept data unless otherwise specified; rejection can be conditional - return !noData || noData !== true && elem.getAttribute("classid") === noData; - } -}); - -jQuery.fn.extend({ - data: function( key, value ) { - var parts, part, attr, name, l, - elem = this[0], - i = 0, - data = null; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = jQuery.data( elem ); - - if ( elem.nodeType === 1 && !jQuery._data( elem, "parsedAttrs" ) ) { - attr = elem.attributes; - for ( l = attr.length; i < l; i++ ) { - name = attr[i].name; - - if ( !name.indexOf( "data-" ) ) { - name = jQuery.camelCase( name.substring(5) ); - - dataAttr( elem, name, data[ name ] ); - } - } - jQuery._data( elem, "parsedAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each(function() { - jQuery.data( this, key ); - }); - } - - parts = key.split( ".", 2 ); - parts[1] = parts[1] ? "." + parts[1] : ""; - part = parts[1] + "!"; - - return jQuery.access( this, function( value ) { - - if ( value === undefined ) { - data = this.triggerHandler( "getData" + part, [ parts[0] ] ); - - // Try to fetch any internally stored data first - if ( data === undefined && elem ) { - data = jQuery.data( elem, key ); - data = dataAttr( elem, key, data ); - } - - return data === undefined && parts[1] ? - this.data( parts[0] ) : - data; - } - - parts[1] = value; - this.each(function() { - var self = jQuery( this ); - - self.triggerHandler( "setData" + part, parts ); - jQuery.data( this, key, value ); - self.triggerHandler( "changeData" + part, parts ); - }); - }, null, value, arguments.length > 1, null, false ); - }, - - removeData: function( key ) { - return this.each(function() { - jQuery.removeData( this, key ); - }); - } -}); - -function dataAttr( elem, key, data ) { - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - - var name = "data-" + key.replace( rmultiDash, "-$1" ).toLowerCase(); - - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = data === "true" ? true : - data === "false" ? false : - data === "null" ? null : - // Only convert to a number if it doesn't change the string - +data + "" === data ? +data : - rbrace.test( data ) ? jQuery.parseJSON( data ) : - data; - } catch( e ) {} - - // Make sure we set the data so it isn't changed later - jQuery.data( elem, key, data ); - - } else { - data = undefined; - } - } - - return data; -} - -// checks a cache object for emptiness -function isEmptyDataObject( obj ) { - var name; - for ( name in obj ) { - - // if the public data object is empty, the private is still empty - if ( name === "data" && jQuery.isEmptyObject( obj[name] ) ) { - continue; - } - if ( name !== "toJSON" ) { - return false; - } - } - - return true; -} -jQuery.extend({ - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = jQuery._data( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || jQuery.isArray(data) ) { - queue = jQuery._data( elem, type, jQuery.makeArray(data) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // not intended for public consumption - generates a queueHooks object, or returns the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return jQuery._data( elem, key ) || jQuery._data( elem, key, { - empty: jQuery.Callbacks("once memory").add(function() { - jQuery.removeData( elem, type + "queue", true ); - jQuery.removeData( elem, key, true ); - }) - }); - } -}); - -jQuery.fn.extend({ - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[0], type ); - } - - return data === undefined ? - this : - this.each(function() { - var queue = jQuery.queue( this, type, data ); - - // ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[0] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - }); - }, - dequeue: function( type ) { - return this.each(function() { - jQuery.dequeue( this, type ); - }); - }, - // Based off of the plugin by Clint Helfers, with permission. - // http://blindsignals.com/index.php/2009/07/jquery-delay/ - delay: function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = setTimeout( next, time ); - hooks.stop = function() { - clearTimeout( timeout ); - }; - }); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while( i-- ) { - tmp = jQuery._data( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -}); -var nodeHook, boolHook, fixSpecified, - rclass = /[\t\r\n]/g, - rreturn = /\r/g, - rtype = /^(?:button|input)$/i, - rfocusable = /^(?:button|input|object|select|textarea)$/i, - rclickable = /^a(?:rea|)$/i, - rboolean = /^(?:autofocus|autoplay|async|checked|controls|defer|disabled|hidden|loop|multiple|open|readonly|required|scoped|selected)$/i, - getSetAttribute = jQuery.support.getSetAttribute; - -jQuery.fn.extend({ - attr: function( name, value ) { - return jQuery.access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each(function() { - jQuery.removeAttr( this, name ); - }); - }, - - prop: function( name, value ) { - return jQuery.access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - name = jQuery.propFix[ name ] || name; - return this.each(function() { - // try/catch handles cases where IE balks (such as removing a property on window) - try { - this[ name ] = undefined; - delete this[ name ]; - } catch( e ) {} - }); - }, - - addClass: function( value ) { - var classNames, i, l, elem, - setClass, c, cl; - - if ( jQuery.isFunction( value ) ) { - return this.each(function( j ) { - jQuery( this ).addClass( value.call(this, j, this.className) ); - }); - } - - if ( value && typeof value === "string" ) { - classNames = value.split( core_rspace ); - - for ( i = 0, l = this.length; i < l; i++ ) { - elem = this[ i ]; - - if ( elem.nodeType === 1 ) { - if ( !elem.className && classNames.length === 1 ) { - elem.className = value; - - } else { - setClass = " " + elem.className + " "; - - for ( c = 0, cl = classNames.length; c < cl; c++ ) { - if ( setClass.indexOf( " " + classNames[ c ] + " " ) < 0 ) { - setClass += classNames[ c ] + " "; - } - } - elem.className = jQuery.trim( setClass ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var removes, className, elem, c, cl, i, l; - - if ( jQuery.isFunction( value ) ) { - return this.each(function( j ) { - jQuery( this ).removeClass( value.call(this, j, this.className) ); - }); - } - if ( (value && typeof value === "string") || value === undefined ) { - removes = ( value || "" ).split( core_rspace ); - - for ( i = 0, l = this.length; i < l; i++ ) { - elem = this[ i ]; - if ( elem.nodeType === 1 && elem.className ) { - - className = (" " + elem.className + " ").replace( rclass, " " ); - - // loop over each item in the removal list - for ( c = 0, cl = removes.length; c < cl; c++ ) { - // Remove until there is nothing to remove, - while ( className.indexOf(" " + removes[ c ] + " ") >= 0 ) { - className = className.replace( " " + removes[ c ] + " " , " " ); - } - } - elem.className = value ? jQuery.trim( className ) : ""; - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value, - isBool = typeof stateVal === "boolean"; - - if ( jQuery.isFunction( value ) ) { - return this.each(function( i ) { - jQuery( this ).toggleClass( value.call(this, i, this.className, stateVal), stateVal ); - }); - } - - return this.each(function() { - if ( type === "string" ) { - // toggle individual class names - var className, - i = 0, - self = jQuery( this ), - state = stateVal, - classNames = value.split( core_rspace ); - - while ( (className = classNames[ i++ ]) ) { - // check each className given, space separated list - state = isBool ? state : !self.hasClass( className ); - self[ state ? "addClass" : "removeClass" ]( className ); - } - - } else if ( type === "undefined" || type === "boolean" ) { - if ( this.className ) { - // store className if set - jQuery._data( this, "__className__", this.className ); - } - - // toggle whole className - this.className = this.className || value === false ? "" : jQuery._data( this, "__className__" ) || ""; - } - }); - }, - - hasClass: function( selector ) { - var className = " " + selector + " ", - i = 0, - l = this.length; - for ( ; i < l; i++ ) { - if ( this[i].nodeType === 1 && (" " + this[i].className + " ").replace(rclass, " ").indexOf( className ) >= 0 ) { - return true; - } - } - - return false; - }, - - val: function( value ) { - var hooks, ret, isFunction, - elem = this[0]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && "get" in hooks && (ret = hooks.get( elem, "value" )) !== undefined ) { - return ret; - } - - ret = elem.value; - - return typeof ret === "string" ? - // handle most common string cases - ret.replace(rreturn, "") : - // handle cases where value is null/undef or number - ret == null ? "" : ret; - } - - return; - } - - isFunction = jQuery.isFunction( value ); - - return this.each(function( i ) { - var val, - self = jQuery(this); - - if ( this.nodeType !== 1 ) { - return; - } - - if ( isFunction ) { - val = value.call( this, i, self.val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - } else if ( typeof val === "number" ) { - val += ""; - } else if ( jQuery.isArray( val ) ) { - val = jQuery.map(val, function ( value ) { - return value == null ? "" : value + ""; - }); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !("set" in hooks) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - }); - } -}); - -jQuery.extend({ - valHooks: { - option: { - get: function( elem ) { - // attributes.value is undefined in Blackberry 4.7 but - // uses .value. See #6932 - var val = elem.attributes.value; - return !val || val.specified ? elem.value : elem.text; - } - }, - select: { - get: function( elem ) { - var value, option, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one" || index < 0, - values = one ? null : [], - max = one ? index + 1 : options.length, - i = index < 0 ? - max : - one ? index : 0; - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // oldIE doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - // Don't return options that are disabled or in a disabled optgroup - ( jQuery.support.optDisabled ? !option.disabled : option.getAttribute("disabled") === null ) && - ( !option.parentNode.disabled || !jQuery.nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var values = jQuery.makeArray( value ); - - jQuery(elem).find("option").each(function() { - this.selected = jQuery.inArray( jQuery(this).val(), values ) >= 0; - }); - - if ( !values.length ) { - elem.selectedIndex = -1; - } - return values; - } - } - }, - - // Unused in 1.8, left in so attrFn-stabbers won't die; remove in 1.9 - attrFn: {}, - - attr: function( elem, name, value, pass ) { - var ret, hooks, notxml, - nType = elem.nodeType; - - // don't get/set attributes on text, comment and attribute nodes - if ( !elem || nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( pass && jQuery.isFunction( jQuery.fn[ name ] ) ) { - return jQuery( elem )[ name ]( value ); - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - notxml = nType !== 1 || !jQuery.isXMLDoc( elem ); - - // All attributes are lowercase - // Grab necessary hook if one is defined - if ( notxml ) { - name = name.toLowerCase(); - hooks = jQuery.attrHooks[ name ] || ( rboolean.test( name ) ? boolHook : nodeHook ); - } - - if ( value !== undefined ) { - - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - - } else if ( hooks && "set" in hooks && notxml && (ret = hooks.set( elem, value, name )) !== undefined ) { - return ret; - - } else { - elem.setAttribute( name, value + "" ); - return value; - } - - } else if ( hooks && "get" in hooks && notxml && (ret = hooks.get( elem, name )) !== null ) { - return ret; - - } else { - - ret = elem.getAttribute( name ); - - // Non-existent attributes return null, we normalize to undefined - return ret === null ? - undefined : - ret; - } - }, - - removeAttr: function( elem, value ) { - var propName, attrNames, name, isBool, - i = 0; - - if ( value && elem.nodeType === 1 ) { - - attrNames = value.split( core_rspace ); - - for ( ; i < attrNames.length; i++ ) { - name = attrNames[ i ]; - - if ( name ) { - propName = jQuery.propFix[ name ] || name; - isBool = rboolean.test( name ); - - // See #9699 for explanation of this approach (setting first, then removal) - // Do not do this for boolean attributes (see #10870) - if ( !isBool ) { - jQuery.attr( elem, name, "" ); - } - elem.removeAttribute( getSetAttribute ? name : propName ); - - // Set corresponding property to false for boolean attributes - if ( isBool && propName in elem ) { - elem[ propName ] = false; - } - } - } - } - }, - - attrHooks: { - type: { - set: function( elem, value ) { - // We can't allow the type property to be changed (since it causes problems in IE) - if ( rtype.test( elem.nodeName ) && elem.parentNode ) { - jQuery.error( "type property can't be changed" ); - } else if ( !jQuery.support.radioValue && value === "radio" && jQuery.nodeName(elem, "input") ) { - // Setting the type on a radio button after the value resets the value in IE6-9 - // Reset value to it's default in case type is set after value - // This is for element creation - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - }, - // Use the value property for back compat - // Use the nodeHook for button elements in IE6/7 (#1954) - value: { - get: function( elem, name ) { - if ( nodeHook && jQuery.nodeName( elem, "button" ) ) { - return nodeHook.get( elem, name ); - } - return name in elem ? - elem.value : - null; - }, - set: function( elem, value, name ) { - if ( nodeHook && jQuery.nodeName( elem, "button" ) ) { - return nodeHook.set( elem, value, name ); - } - // Does not return so that setAttribute is also used - elem.value = value; - } - } - }, - - propFix: { - tabindex: "tabIndex", - readonly: "readOnly", - "for": "htmlFor", - "class": "className", - maxlength: "maxLength", - cellspacing: "cellSpacing", - cellpadding: "cellPadding", - rowspan: "rowSpan", - colspan: "colSpan", - usemap: "useMap", - frameborder: "frameBorder", - contenteditable: "contentEditable" - }, - - prop: function( elem, name, value ) { - var ret, hooks, notxml, - nType = elem.nodeType; - - // don't get/set properties on text, comment and attribute nodes - if ( !elem || nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - notxml = nType !== 1 || !jQuery.isXMLDoc( elem ); - - if ( notxml ) { - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && (ret = hooks.set( elem, value, name )) !== undefined ) { - return ret; - - } else { - return ( elem[ name ] = value ); - } - - } else { - if ( hooks && "get" in hooks && (ret = hooks.get( elem, name )) !== null ) { - return ret; - - } else { - return elem[ name ]; - } - } - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - // elem.tabIndex doesn't always return the correct value when it hasn't been explicitly set - // http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - var attributeNode = elem.getAttributeNode("tabindex"); - - return attributeNode && attributeNode.specified ? - parseInt( attributeNode.value, 10 ) : - rfocusable.test( elem.nodeName ) || rclickable.test( elem.nodeName ) && elem.href ? - 0 : - undefined; - } - } - } -}); - -// Hook for boolean attributes -boolHook = { - get: function( elem, name ) { - // Align boolean attributes with corresponding properties - // Fall back to attribute presence where some booleans are not supported - var attrNode, - property = jQuery.prop( elem, name ); - return property === true || typeof property !== "boolean" && ( attrNode = elem.getAttributeNode(name) ) && attrNode.nodeValue !== false ? - name.toLowerCase() : - undefined; - }, - set: function( elem, value, name ) { - var propName; - if ( value === false ) { - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - // value is true since we know at this point it's type boolean and not false - // Set boolean attributes to the same name and set the DOM property - propName = jQuery.propFix[ name ] || name; - if ( propName in elem ) { - // Only set the IDL specifically if it already exists on the element - elem[ propName ] = true; - } - - elem.setAttribute( name, name.toLowerCase() ); - } - return name; - } -}; - -// IE6/7 do not support getting/setting some attributes with get/setAttribute -if ( !getSetAttribute ) { - - fixSpecified = { - name: true, - id: true, - coords: true - }; - - // Use this for any attribute in IE6/7 - // This fixes almost every IE6/7 issue - nodeHook = jQuery.valHooks.button = { - get: function( elem, name ) { - var ret; - ret = elem.getAttributeNode( name ); - return ret && ( fixSpecified[ name ] ? ret.value !== "" : ret.specified ) ? - ret.value : - undefined; - }, - set: function( elem, value, name ) { - // Set the existing or create a new attribute node - var ret = elem.getAttributeNode( name ); - if ( !ret ) { - ret = document.createAttribute( name ); - elem.setAttributeNode( ret ); - } - return ( ret.value = value + "" ); - } - }; - - // Set width and height to auto instead of 0 on empty string( Bug #8150 ) - // This is for removals - jQuery.each([ "width", "height" ], function( i, name ) { - jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], { - set: function( elem, value ) { - if ( value === "" ) { - elem.setAttribute( name, "auto" ); - return value; - } - } - }); - }); - - // Set contenteditable to false on removals(#10429) - // Setting to empty string throws an error as an invalid value - jQuery.attrHooks.contenteditable = { - get: nodeHook.get, - set: function( elem, value, name ) { - if ( value === "" ) { - value = "false"; - } - nodeHook.set( elem, value, name ); - } - }; -} - - -// Some attributes require a special call on IE -if ( !jQuery.support.hrefNormalized ) { - jQuery.each([ "href", "src", "width", "height" ], function( i, name ) { - jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], { - get: function( elem ) { - var ret = elem.getAttribute( name, 2 ); - return ret === null ? undefined : ret; - } - }); - }); -} - -if ( !jQuery.support.style ) { - jQuery.attrHooks.style = { - get: function( elem ) { - // Return undefined in the case of empty string - // Normalize to lowercase since IE uppercases css property names - return elem.style.cssText.toLowerCase() || undefined; - }, - set: function( elem, value ) { - return ( elem.style.cssText = value + "" ); - } - }; -} - -// Safari mis-reports the default selected property of an option -// Accessing the parent's selectedIndex property fixes it -if ( !jQuery.support.optSelected ) { - jQuery.propHooks.selected = jQuery.extend( jQuery.propHooks.selected, { - get: function( elem ) { - var parent = elem.parentNode; - - if ( parent ) { - parent.selectedIndex; - - // Make sure that it also works with optgroups, see #5701 - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - return null; - } - }); -} - -// IE6/7 call enctype encoding -if ( !jQuery.support.enctype ) { - jQuery.propFix.enctype = "encoding"; -} - -// Radios and checkboxes getter/setter -if ( !jQuery.support.checkOn ) { - jQuery.each([ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - get: function( elem ) { - // Handle the case where in Webkit "" is returned instead of "on" if a value isn't specified - return elem.getAttribute("value") === null ? "on" : elem.value; - } - }; - }); -} -jQuery.each([ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = jQuery.extend( jQuery.valHooks[ this ], { - set: function( elem, value ) { - if ( jQuery.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery(elem).val(), value ) >= 0 ); - } - } - }); -}); -var rformElems = /^(?:textarea|input|select)$/i, - rtypenamespace = /^([^\.]*|)(?:\.(.+)|)$/, - rhoverHack = /(?:^|\s)hover(\.\S+|)\b/, - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|contextmenu)|click/, - rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, - hoverHack = function( events ) { - return jQuery.event.special.hover ? events : events.replace( rhoverHack, "mouseenter$1 mouseleave$1" ); - }; - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - add: function( elem, types, handler, data, selector ) { - - var elemData, eventHandle, events, - t, tns, type, namespaces, handleObj, - handleObjIn, handlers, special; - - // Don't attach events to noData or text/comment nodes (allow plain objects tho) - if ( elem.nodeType === 3 || elem.nodeType === 8 || !types || !handler || !(elemData = jQuery._data( elem )) ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - events = elemData.events; - if ( !events ) { - elemData.events = events = {}; - } - eventHandle = elemData.handle; - if ( !eventHandle ) { - elemData.handle = eventHandle = function( e ) { - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && (!e || jQuery.event.triggered !== e.type) ? - jQuery.event.dispatch.apply( eventHandle.elem, arguments ) : - undefined; - }; - // Add elem as a property of the handle fn to prevent a memory leak with IE non-native events - eventHandle.elem = elem; - } - - // Handle multiple events separated by a space - // jQuery(...).bind("mouseover mouseout", fn); - types = jQuery.trim( hoverHack(types) ).split( " " ); - for ( t = 0; t < types.length; t++ ) { - - tns = rtypenamespace.exec( types[t] ) || []; - type = tns[1]; - namespaces = ( tns[2] || "" ).split( "." ).sort(); - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend({ - type: type, - origType: tns[1], - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join(".") - }, handleObjIn ); - - // Init the event handler queue if we're the first - handlers = events[ type ]; - if ( !handlers ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener/attachEvent if the special events handler returns false - if ( !special.setup || special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - // Bind the global event handler to the element - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle, false ); - - } else if ( elem.attachEvent ) { - elem.attachEvent( "on" + type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - // Nullify elem to prevent memory leaks in IE - elem = null; - }, - - global: {}, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var t, tns, type, origType, namespaces, origCount, - j, events, special, eventType, handleObj, - elemData = jQuery.hasData( elem ) && jQuery._data( elem ); - - if ( !elemData || !(events = elemData.events) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = jQuery.trim( hoverHack( types || "" ) ).split(" "); - for ( t = 0; t < types.length; t++ ) { - tns = rtypenamespace.exec( types[t] ) || []; - type = origType = tns[1]; - namespaces = tns[2]; - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector? special.delegateType : special.bindType ) || type; - eventType = events[ type ] || []; - origCount = eventType.length; - namespaces = namespaces ? new RegExp("(^|\\.)" + namespaces.split(".").sort().join("\\.(?:.*\\.|)") + "(\\.|$)") : null; - - // Remove matching events - for ( j = 0; j < eventType.length; j++ ) { - handleObj = eventType[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !namespaces || namespaces.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || selector === "**" && handleObj.selector ) ) { - eventType.splice( j--, 1 ); - - if ( handleObj.selector ) { - eventType.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( eventType.length === 0 && origCount !== eventType.length ) { - if ( !special.teardown || special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - delete elemData.handle; - - // removeData also checks for emptiness and clears the expando if empty - // so use it instead of delete - jQuery.removeData( elem, "events", true ); - } - }, - - // Events that are safe to short-circuit if no handlers are attached. - // Native DOM events should not be added, they may have inline handlers. - customEvent: { - "getData": true, - "setData": true, - "changeData": true - }, - - trigger: function( event, data, elem, onlyHandlers ) { - // Don't do events on text and comment nodes - if ( elem && (elem.nodeType === 3 || elem.nodeType === 8) ) { - return; - } - - // Event object or event type - var cache, exclusive, i, cur, old, ontype, special, handle, eventPath, bubbleType, - type = event.type || event, - namespaces = []; - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "!" ) >= 0 ) { - // Exclusive events trigger only for the exact event (no namespaces) - type = type.slice(0, -1); - exclusive = true; - } - - if ( type.indexOf( "." ) >= 0 ) { - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split("."); - type = namespaces.shift(); - namespaces.sort(); - } - - if ( (!elem || jQuery.event.customEvent[ type ]) && !jQuery.event.global[ type ] ) { - // No jQuery handlers for this event type, and it can't have inline handlers - return; - } - - // Caller can pass in an Event, Object, or just an event type string - event = typeof event === "object" ? - // jQuery.Event object - event[ jQuery.expando ] ? event : - // Object literal - new jQuery.Event( type, event ) : - // Just the event type (string) - new jQuery.Event( type ); - - event.type = type; - event.isTrigger = true; - event.exclusive = exclusive; - event.namespace = namespaces.join( "." ); - event.namespace_re = event.namespace? new RegExp("(^|\\.)" + namespaces.join("\\.(?:.*\\.|)") + "(\\.|$)") : null; - ontype = type.indexOf( ":" ) < 0 ? "on" + type : ""; - - // Handle a global trigger - if ( !elem ) { - - // TODO: Stop taunting the data cache; remove global events and always attach to document - cache = jQuery.cache; - for ( i in cache ) { - if ( cache[ i ].events && cache[ i ].events[ type ] ) { - jQuery.event.trigger( event, data, cache[ i ].handle.elem, true ); - } - } - return; - } - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data != null ? jQuery.makeArray( data ) : []; - data.unshift( event ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - eventPath = [[ elem, special.bindType || type ]]; - if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - cur = rfocusMorph.test( bubbleType + type ) ? elem : elem.parentNode; - for ( old = elem; cur; cur = cur.parentNode ) { - eventPath.push([ cur, bubbleType ]); - old = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( old === (elem.ownerDocument || document) ) { - eventPath.push([ old.defaultView || old.parentWindow || window, bubbleType ]); - } - } - - // Fire handlers on the event path - for ( i = 0; i < eventPath.length && !event.isPropagationStopped(); i++ ) { - - cur = eventPath[i][0]; - event.type = eventPath[i][1]; - - handle = ( jQuery._data( cur, "events" ) || {} )[ event.type ] && jQuery._data( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - // Note that this is a bare JS function and not a jQuery handler - handle = ontype && cur[ ontype ]; - if ( handle && jQuery.acceptData( cur ) && handle.apply && handle.apply( cur, data ) === false ) { - event.preventDefault(); - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( (!special._default || special._default.apply( elem.ownerDocument, data ) === false) && - !(type === "click" && jQuery.nodeName( elem, "a" )) && jQuery.acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name name as the event. - // Can't use an .isFunction() check here because IE6/7 fails that test. - // Don't do default actions on window, that's where global variables be (#6170) - // IE<9 dies on focus/blur to hidden element (#1486) - if ( ontype && elem[ type ] && ((type !== "focus" && type !== "blur") || event.target.offsetWidth !== 0) && !jQuery.isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - old = elem[ ontype ]; - - if ( old ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - elem[ type ](); - jQuery.event.triggered = undefined; - - if ( old ) { - elem[ ontype ] = old; - } - } - } - } - - return event.result; - }, - - dispatch: function( event ) { - - // Make a writable jQuery.Event from the native event object - event = jQuery.event.fix( event || window.event ); - - var i, j, cur, ret, selMatch, matched, matches, handleObj, sel, related, - handlers = ( (jQuery._data( this, "events" ) || {} )[ event.type ] || []), - delegateCount = handlers.delegateCount, - args = core_slice.call( arguments ), - run_all = !event.exclusive && !event.namespace, - special = jQuery.event.special[ event.type ] || {}, - handlerQueue = []; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[0] = event; - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers that should run if there are delegated events - // Avoid non-left-click bubbling in Firefox (#3861) - if ( delegateCount && !(event.button && event.type === "click") ) { - - for ( cur = event.target; cur != this; cur = cur.parentNode || this ) { - - // Don't process clicks (ONLY) on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.disabled !== true || event.type !== "click" ) { - selMatch = {}; - matches = []; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - sel = handleObj.selector; - - if ( selMatch[ sel ] === undefined ) { - selMatch[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) >= 0 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( selMatch[ sel ] ) { - matches.push( handleObj ); - } - } - if ( matches.length ) { - handlerQueue.push({ elem: cur, matches: matches }); - } - } - } - } - - // Add the remaining (directly-bound) handlers - if ( handlers.length > delegateCount ) { - handlerQueue.push({ elem: this, matches: handlers.slice( delegateCount ) }); - } - - // Run delegates first; they may want to stop propagation beneath us - for ( i = 0; i < handlerQueue.length && !event.isPropagationStopped(); i++ ) { - matched = handlerQueue[ i ]; - event.currentTarget = matched.elem; - - for ( j = 0; j < matched.matches.length && !event.isImmediatePropagationStopped(); j++ ) { - handleObj = matched.matches[ j ]; - - // Triggered event must either 1) be non-exclusive and have no namespace, or - // 2) have namespace(s) a subset or equal to those in the bound event (both can have no namespace). - if ( run_all || (!event.namespace && !handleObj.namespace) || event.namespace_re && event.namespace_re.test( handleObj.namespace ) ) { - - event.data = handleObj.data; - event.handleObj = handleObj; - - ret = ( (jQuery.event.special[ handleObj.origType ] || {}).handle || handleObj.handler ) - .apply( matched.elem, args ); - - if ( ret !== undefined ) { - event.result = ret; - if ( ret === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - // Includes some event props shared by KeyEvent and MouseEvent - // *** attrChange attrName relatedNode srcElement are not normalized, non-W3C, deprecated, will be removed in 1.8 *** - props: "attrChange attrName relatedNode srcElement altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "), - - fixHooks: {}, - - keyHooks: { - props: "char charCode key keyCode".split(" "), - filter: function( event, original ) { - - // Add which for key events - if ( event.which == null ) { - event.which = original.charCode != null ? original.charCode : original.keyCode; - } - - return event; - } - }, - - mouseHooks: { - props: "button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "), - filter: function( event, original ) { - var eventDoc, doc, body, - button = original.button, - fromElement = original.fromElement; - - // Calculate pageX/Y if missing and clientX/Y available - if ( event.pageX == null && original.clientX != null ) { - eventDoc = event.target.ownerDocument || document; - doc = eventDoc.documentElement; - body = eventDoc.body; - - event.pageX = original.clientX + ( doc && doc.scrollLeft || body && body.scrollLeft || 0 ) - ( doc && doc.clientLeft || body && body.clientLeft || 0 ); - event.pageY = original.clientY + ( doc && doc.scrollTop || body && body.scrollTop || 0 ) - ( doc && doc.clientTop || body && body.clientTop || 0 ); - } - - // Add relatedTarget, if necessary - if ( !event.relatedTarget && fromElement ) { - event.relatedTarget = fromElement === event.target ? original.toElement : fromElement; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - // Note: button is not normalized, so don't use it - if ( !event.which && button !== undefined ) { - event.which = ( button & 1 ? 1 : ( button & 2 ? 3 : ( button & 4 ? 2 : 0 ) ) ); - } - - return event; - } - }, - - fix: function( event ) { - if ( event[ jQuery.expando ] ) { - return event; - } - - // Create a writable copy of the event object and normalize some properties - var i, prop, - originalEvent = event, - fixHook = jQuery.event.fixHooks[ event.type ] || {}, - copy = fixHook.props ? this.props.concat( fixHook.props ) : this.props; - - event = jQuery.Event( originalEvent ); - - for ( i = copy.length; i; ) { - prop = copy[ --i ]; - event[ prop ] = originalEvent[ prop ]; - } - - // Fix target property, if necessary (#1925, IE 6/7/8 & Safari2) - if ( !event.target ) { - event.target = originalEvent.srcElement || document; - } - - // Target should not be a text node (#504, Safari) - if ( event.target.nodeType === 3 ) { - event.target = event.target.parentNode; - } - - // For mouse/key events, metaKey==false if it's undefined (#3368, #11328; IE6/7/8) - event.metaKey = !!event.metaKey; - - return fixHook.filter? fixHook.filter( event, originalEvent ) : event; - }, - - special: { - load: { - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - - focus: { - delegateType: "focusin" - }, - blur: { - delegateType: "focusout" - }, - - beforeunload: { - setup: function( data, namespaces, eventHandle ) { - // We only want to do this special case on windows - if ( jQuery.isWindow( this ) ) { - this.onbeforeunload = eventHandle; - } - }, - - teardown: function( namespaces, eventHandle ) { - if ( this.onbeforeunload === eventHandle ) { - this.onbeforeunload = null; - } - } - } - }, - - simulate: function( type, elem, event, bubble ) { - // Piggyback on a donor event to simulate a different one. - // Fake originalEvent to avoid donor's stopPropagation, but if the - // simulated event prevents default then we do the same on the donor. - var e = jQuery.extend( - new jQuery.Event(), - event, - { type: type, - isSimulated: true, - originalEvent: {} - } - ); - if ( bubble ) { - jQuery.event.trigger( e, null, elem ); - } else { - jQuery.event.dispatch.call( elem, e ); - } - if ( e.isDefaultPrevented() ) { - event.preventDefault(); - } - } -}; - -// Some plugins are using, but it's undocumented/deprecated and will be removed. -// The 1.7 special event interface should provide all the hooks needed now. -jQuery.event.handle = jQuery.event.dispatch; - -jQuery.removeEvent = document.removeEventListener ? - function( elem, type, handle ) { - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle, false ); - } - } : - function( elem, type, handle ) { - var name = "on" + type; - - if ( elem.detachEvent ) { - - // #8545, #7054, preventing memory leaks for custom events in IE6-8 - // detachEvent needed property on element, by name of that event, to properly expose it to GC - if ( typeof elem[ name ] === "undefined" ) { - elem[ name ] = null; - } - - elem.detachEvent( name, handle ); - } - }; - -jQuery.Event = function( src, props ) { - // Allow instantiation without the 'new' keyword - if ( !(this instanceof jQuery.Event) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = ( src.defaultPrevented || src.returnValue === false || - src.getPreventDefault && src.getPreventDefault() ) ? returnTrue : returnFalse; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || jQuery.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -function returnFalse() { - return false; -} -function returnTrue() { - return true; -} - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// http://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - preventDefault: function() { - this.isDefaultPrevented = returnTrue; - - var e = this.originalEvent; - if ( !e ) { - return; - } - - // if preventDefault exists run it on the original event - if ( e.preventDefault ) { - e.preventDefault(); - - // otherwise set the returnValue property of the original event to false (IE) - } else { - e.returnValue = false; - } - }, - stopPropagation: function() { - this.isPropagationStopped = returnTrue; - - var e = this.originalEvent; - if ( !e ) { - return; - } - // if stopPropagation exists run it on the original event - if ( e.stopPropagation ) { - e.stopPropagation(); - } - // otherwise set the cancelBubble property of the original event to true (IE) - e.cancelBubble = true; - }, - stopImmediatePropagation: function() { - this.isImmediatePropagationStopped = returnTrue; - this.stopPropagation(); - }, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse -}; - -// Create mouseenter/leave events using mouseover/out and event-time checks -jQuery.each({ - mouseenter: "mouseover", - mouseleave: "mouseout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj, - selector = handleObj.selector; - - // For mousenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || (related !== target && !jQuery.contains( target, related )) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -}); - -// IE submit delegation -if ( !jQuery.support.submitBubbles ) { - - jQuery.event.special.submit = { - setup: function() { - // Only need this for delegated form submit events - if ( jQuery.nodeName( this, "form" ) ) { - return false; - } - - // Lazy-add a submit handler when a descendant form may potentially be submitted - jQuery.event.add( this, "click._submit keypress._submit", function( e ) { - // Node name check avoids a VML-related crash in IE (#9807) - var elem = e.target, - form = jQuery.nodeName( elem, "input" ) || jQuery.nodeName( elem, "button" ) ? elem.form : undefined; - if ( form && !jQuery._data( form, "_submit_attached" ) ) { - jQuery.event.add( form, "submit._submit", function( event ) { - event._submit_bubble = true; - }); - jQuery._data( form, "_submit_attached", true ); - } - }); - // return undefined since we don't need an event listener - }, - - postDispatch: function( event ) { - // If form was submitted by the user, bubble the event up the tree - if ( event._submit_bubble ) { - delete event._submit_bubble; - if ( this.parentNode && !event.isTrigger ) { - jQuery.event.simulate( "submit", this.parentNode, event, true ); - } - } - }, - - teardown: function() { - // Only need this for delegated form submit events - if ( jQuery.nodeName( this, "form" ) ) { - return false; - } - - // Remove delegated handlers; cleanData eventually reaps submit handlers attached above - jQuery.event.remove( this, "._submit" ); - } - }; -} - -// IE change delegation and checkbox/radio fix -if ( !jQuery.support.changeBubbles ) { - - jQuery.event.special.change = { - - setup: function() { - - if ( rformElems.test( this.nodeName ) ) { - // IE doesn't fire change on a check/radio until blur; trigger it on click - // after a propertychange. Eat the blur-change in special.change.handle. - // This still fires onchange a second time for check/radio after blur. - if ( this.type === "checkbox" || this.type === "radio" ) { - jQuery.event.add( this, "propertychange._change", function( event ) { - if ( event.originalEvent.propertyName === "checked" ) { - this._just_changed = true; - } - }); - jQuery.event.add( this, "click._change", function( event ) { - if ( this._just_changed && !event.isTrigger ) { - this._just_changed = false; - } - // Allow triggered, simulated change events (#11500) - jQuery.event.simulate( "change", this, event, true ); - }); - } - return false; - } - // Delegated event; lazy-add a change handler on descendant inputs - jQuery.event.add( this, "beforeactivate._change", function( e ) { - var elem = e.target; - - if ( rformElems.test( elem.nodeName ) && !jQuery._data( elem, "_change_attached" ) ) { - jQuery.event.add( elem, "change._change", function( event ) { - if ( this.parentNode && !event.isSimulated && !event.isTrigger ) { - jQuery.event.simulate( "change", this.parentNode, event, true ); - } - }); - jQuery._data( elem, "_change_attached", true ); - } - }); - }, - - handle: function( event ) { - var elem = event.target; - - // Swallow native change events from checkbox/radio, we already triggered them above - if ( this !== elem || event.isSimulated || event.isTrigger || (elem.type !== "radio" && elem.type !== "checkbox") ) { - return event.handleObj.handler.apply( this, arguments ); - } - }, - - teardown: function() { - jQuery.event.remove( this, "._change" ); - - return !rformElems.test( this.nodeName ); - } - }; -} - -// Create "bubbling" focus and blur events -if ( !jQuery.support.focusinBubbles ) { - jQuery.each({ focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler while someone wants focusin/focusout - var attaches = 0, - handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ), true ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - if ( attaches++ === 0 ) { - document.addEventListener( orig, handler, true ); - } - }, - teardown: function() { - if ( --attaches === 0 ) { - document.removeEventListener( orig, handler, true ); - } - } - }; - }); -} - -jQuery.fn.extend({ - - on: function( types, selector, data, fn, /*INTERNAL*/ one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { // && selector != null - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - this.on( type, selector, data, types[ type ], one ); - } - return this; - } - - if ( data == null && fn == null ) { - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return this; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return this.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - }); - }, - one: function( types, selector, data, fn ) { - return this.on( types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? handleObj.origType + "." + handleObj.namespace : handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each(function() { - jQuery.event.remove( this, types, fn, selector ); - }); - }, - - bind: function( types, data, fn ) { - return this.on( types, null, data, fn ); - }, - unbind: function( types, fn ) { - return this.off( types, null, fn ); - }, - - live: function( types, data, fn ) { - jQuery( this.context ).on( types, this.selector, data, fn ); - return this; - }, - die: function( types, fn ) { - jQuery( this.context ).off( types, this.selector || "**", fn ); - return this; - }, - - delegate: function( selector, types, data, fn ) { - return this.on( types, selector, data, fn ); - }, - undelegate: function( selector, types, fn ) { - // ( namespace ) or ( selector, types [, fn] ) - return arguments.length === 1 ? this.off( selector, "**" ) : this.off( types, selector || "**", fn ); - }, - - trigger: function( type, data ) { - return this.each(function() { - jQuery.event.trigger( type, data, this ); - }); - }, - triggerHandler: function( type, data ) { - if ( this[0] ) { - return jQuery.event.trigger( type, data, this[0], true ); - } - }, - - toggle: function( fn ) { - // Save reference to arguments for access in closure - var args = arguments, - guid = fn.guid || jQuery.guid++, - i = 0, - toggler = function( event ) { - // Figure out which function to execute - var lastToggle = ( jQuery._data( this, "lastToggle" + fn.guid ) || 0 ) % i; - jQuery._data( this, "lastToggle" + fn.guid, lastToggle + 1 ); - - // Make sure that clicks stop - event.preventDefault(); - - // and execute the function - return args[ lastToggle ].apply( this, arguments ) || false; - }; - - // link all the functions, so any of them can unbind this click handler - toggler.guid = guid; - while ( i < args.length ) { - args[ i++ ].guid = guid; - } - - return this.click( toggler ); - }, - - hover: function( fnOver, fnOut ) { - return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); - } -}); - -jQuery.each( ("blur focus focusin focusout load resize scroll unload click dblclick " + - "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + - "change select submit keydown keypress keyup error contextmenu").split(" "), function( i, name ) { - - // Handle event binding - jQuery.fn[ name ] = function( data, fn ) { - if ( fn == null ) { - fn = data; - data = null; - } - - return arguments.length > 0 ? - this.on( name, null, data, fn ) : - this.trigger( name ); - }; - - if ( rkeyEvent.test( name ) ) { - jQuery.event.fixHooks[ name ] = jQuery.event.keyHooks; - } - - if ( rmouseEvent.test( name ) ) { - jQuery.event.fixHooks[ name ] = jQuery.event.mouseHooks; - } -}); -/*! - * Sizzle CSS Selector Engine - * Copyright 2012 jQuery Foundation and other contributors - * Released under the MIT license - * http://sizzlejs.com/ - */ -(function( window, undefined ) { - -var cachedruns, - assertGetIdNotName, - Expr, - getText, - isXML, - contains, - compile, - sortOrder, - hasDuplicate, - outermostContext, - - baseHasDuplicate = true, - strundefined = "undefined", - - expando = ( "sizcache" + Math.random() ).replace( ".", "" ), - - Token = String, - document = window.document, - docElem = document.documentElement, - dirruns = 0, - done = 0, - pop = [].pop, - push = [].push, - slice = [].slice, - // Use a stripped-down indexOf if a native one is unavailable - indexOf = [].indexOf || function( elem ) { - var i = 0, - len = this.length; - for ( ; i < len; i++ ) { - if ( this[i] === elem ) { - return i; - } - } - return -1; - }, - - // Augment a function for special use by Sizzle - markFunction = function( fn, value ) { - fn[ expando ] = value == null || value; - return fn; - }, - - createCache = function() { - var cache = {}, - keys = []; - - return markFunction(function( key, value ) { - // Only keep the most recent entries - if ( keys.push( key ) > Expr.cacheLength ) { - delete cache[ keys.shift() ]; - } - - // Retrieve with (key + " ") to avoid collision with native Object.prototype properties (see Issue #157) - return (cache[ key + " " ] = value); - }, cache ); - }, - - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - - // Regex - - // Whitespace characters http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - // http://www.w3.org/TR/css3-syntax/#characters - characterEncoding = "(?:\\\\.|[-\\w]|[^\\x00-\\xa0])+", - - // Loosely modeled on CSS identifier characters - // An unquoted value should be a CSS identifier (http://www.w3.org/TR/css3-selectors/#attribute-selectors) - // Proper syntax: http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = characterEncoding.replace( "w", "w#" ), - - // Acceptable operators http://www.w3.org/TR/selectors/#attribute-selectors - operators = "([*^$|!~]?=)", - attributes = "\\[" + whitespace + "*(" + characterEncoding + ")" + whitespace + - "*(?:" + operators + whitespace + "*(?:(['\"])((?:\\\\.|[^\\\\])*?)\\3|(" + identifier + ")|)|)" + whitespace + "*\\]", - - // Prefer arguments not in parens/brackets, - // then attribute selectors and non-pseudos (denoted by :), - // then anything else - // These preferences are here to reduce the number of selectors - // needing tokenize in the PSEUDO preFilter - pseudos = ":(" + characterEncoding + ")(?:\\((?:(['\"])((?:\\\\.|[^\\\\])*?)\\2|([^()[\\]]*|(?:(?:" + attributes + ")|[^:]|\\\\.)*|.*))\\)|)", - - // For matchExpr.POS and matchExpr.needsContext - pos = ":(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + - "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([\\x20\\t\\r\\n\\f>+~])" + whitespace + "*" ), - rpseudo = new RegExp( pseudos ), - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w\-]+)|(\w+)|\.([\w\-]+))$/, - - rnot = /^:not/, - rsibling = /[\x20\t\r\n\f]*[+~]/, - rendsWithNot = /:not\($/, - - rheader = /h\d/i, - rinputs = /input|select|textarea|button/i, - - rbackslash = /\\(?!\\)/g, - - matchExpr = { - "ID": new RegExp( "^#(" + characterEncoding + ")" ), - "CLASS": new RegExp( "^\\.(" + characterEncoding + ")" ), - "NAME": new RegExp( "^\\[name=['\"]?(" + characterEncoding + ")['\"]?\\]" ), - "TAG": new RegExp( "^(" + characterEncoding.replace( "w", "w*" ) + ")" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "POS": new RegExp( pos, "i" ), - "CHILD": new RegExp( "^:(only|nth|first|last)-child(?:\\(" + whitespace + - "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - // For use in libraries implementing .is() - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|" + pos, "i" ) - }, - - // Support - - // Used for testing something on an element - assert = function( fn ) { - var div = document.createElement("div"); - - try { - return fn( div ); - } catch (e) { - return false; - } finally { - // release memory in IE - div = null; - } - }, - - // Check if getElementsByTagName("*") returns only elements - assertTagNameNoComments = assert(function( div ) { - div.appendChild( document.createComment("") ); - return !div.getElementsByTagName("*").length; - }), - - // Check if getAttribute returns normalized href attributes - assertHrefNotNormalized = assert(function( div ) { - div.innerHTML = ""; - return div.firstChild && typeof div.firstChild.getAttribute !== strundefined && - div.firstChild.getAttribute("href") === "#"; - }), - - // Check if attributes should be retrieved by attribute nodes - assertAttributes = assert(function( div ) { - div.innerHTML = ""; - var type = typeof div.lastChild.getAttribute("multiple"); - // IE8 returns a string for some attributes even when not present - return type !== "boolean" && type !== "string"; - }), - - // Check if getElementsByClassName can be trusted - assertUsableClassName = assert(function( div ) { - // Opera can't find a second classname (in 9.6) - div.innerHTML = ""; - if ( !div.getElementsByClassName || !div.getElementsByClassName("e").length ) { - return false; - } - - // Safari 3.2 caches class attributes and doesn't catch changes - div.lastChild.className = "e"; - return div.getElementsByClassName("e").length === 2; - }), - - // Check if getElementById returns elements by name - // Check if getElementsByName privileges form controls or returns elements by ID - assertUsableName = assert(function( div ) { - // Inject content - div.id = expando + 0; - div.innerHTML = "
"; - docElem.insertBefore( div, docElem.firstChild ); - - // Test - var pass = document.getElementsByName && - // buggy browsers will return fewer than the correct 2 - document.getElementsByName( expando ).length === 2 + - // buggy browsers will return more than the correct 0 - document.getElementsByName( expando + 0 ).length; - assertGetIdNotName = !document.getElementById( expando ); - - // Cleanup - docElem.removeChild( div ); - - return pass; - }); - -// If slice is not available, provide a backup -try { - slice.call( docElem.childNodes, 0 )[0].nodeType; -} catch ( e ) { - slice = function( i ) { - var elem, - results = []; - for ( ; (elem = this[i]); i++ ) { - results.push( elem ); - } - return results; - }; -} - -function Sizzle( selector, context, results, seed ) { - results = results || []; - context = context || document; - var match, elem, xml, m, - nodeType = context.nodeType; - - if ( !selector || typeof selector !== "string" ) { - return results; - } - - if ( nodeType !== 1 && nodeType !== 9 ) { - return []; - } - - xml = isXML( context ); - - if ( !xml && !seed ) { - if ( (match = rquickExpr.exec( selector )) ) { - // Speed-up: Sizzle("#ID") - if ( (m = match[1]) ) { - if ( nodeType === 9 ) { - elem = context.getElementById( m ); - // Check parentNode to catch when Blackberry 4.6 returns - // nodes that are no longer in the document #6963 - if ( elem && elem.parentNode ) { - // Handle the case where IE, Opera, and Webkit return items - // by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - } else { - // Context is not a document - if ( context.ownerDocument && (elem = context.ownerDocument.getElementById( m )) && - contains( context, elem ) && elem.id === m ) { - results.push( elem ); - return results; - } - } - - // Speed-up: Sizzle("TAG") - } else if ( match[2] ) { - push.apply( results, slice.call(context.getElementsByTagName( selector ), 0) ); - return results; - - // Speed-up: Sizzle(".CLASS") - } else if ( (m = match[3]) && assertUsableClassName && context.getElementsByClassName ) { - push.apply( results, slice.call(context.getElementsByClassName( m ), 0) ); - return results; - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed, xml ); -} - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - return Sizzle( expr, null, null, [ elem ] ).length > 0; -}; - -// Returns a function to use in pseudos for input types -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -// Returns a function to use in pseudos for buttons -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; - }; -} - -// Returns a function to use in pseudos for positionals -function createPositionalPseudo( fn ) { - return markFunction(function( argument ) { - argument = +argument; - return markFunction(function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ (j = matchIndexes[i]) ] ) { - seed[j] = !(matches[j] = seed[j]); - } - } - }); - }); -} - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( nodeType ) { - if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - // Use textContent for elements - // innerText usage removed for consistency of new lines (see #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - // Do not include comment or processing instruction nodes - } else { - - // If no nodeType, this is expected to be an array - for ( ; (node = elem[i]); i++ ) { - // Do not traverse comment nodes - ret += getText( node ); - } - } - return ret; -}; - -isXML = Sizzle.isXML = function( elem ) { - // documentElement is verified for cases where it doesn't yet exist - // (such as loading iframes in IE - #4833) - var documentElement = elem && (elem.ownerDocument || elem).documentElement; - return documentElement ? documentElement.nodeName !== "HTML" : false; -}; - -// Element contains another -contains = Sizzle.contains = docElem.contains ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && adown.contains && adown.contains(bup) ); - } : - docElem.compareDocumentPosition ? - function( a, b ) { - return b && !!( a.compareDocumentPosition( b ) & 16 ); - } : - function( a, b ) { - while ( (b = b.parentNode) ) { - if ( b === a ) { - return true; - } - } - return false; - }; - -Sizzle.attr = function( elem, name ) { - var val, - xml = isXML( elem ); - - if ( !xml ) { - name = name.toLowerCase(); - } - if ( (val = Expr.attrHandle[ name ]) ) { - return val( elem ); - } - if ( xml || assertAttributes ) { - return elem.getAttribute( name ); - } - val = elem.getAttributeNode( name ); - return val ? - typeof elem[ name ] === "boolean" ? - elem[ name ] ? name : null : - val.specified ? val.value : null : - null; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - // IE6/7 return a modified href - attrHandle: assertHrefNotNormalized ? - {} : - { - "href": function( elem ) { - return elem.getAttribute( "href", 2 ); - }, - "type": function( elem ) { - return elem.getAttribute("type"); - } - }, - - find: { - "ID": assertGetIdNotName ? - function( id, context, xml ) { - if ( typeof context.getElementById !== strundefined && !xml ) { - var m = context.getElementById( id ); - // Check parentNode to catch when Blackberry 4.6 returns - // nodes that are no longer in the document #6963 - return m && m.parentNode ? [m] : []; - } - } : - function( id, context, xml ) { - if ( typeof context.getElementById !== strundefined && !xml ) { - var m = context.getElementById( id ); - - return m ? - m.id === id || typeof m.getAttributeNode !== strundefined && m.getAttributeNode("id").value === id ? - [m] : - undefined : - []; - } - }, - - "TAG": assertTagNameNoComments ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== strundefined ) { - return context.getElementsByTagName( tag ); - } - } : - function( tag, context ) { - var results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - var elem, - tmp = [], - i = 0; - - for ( ; (elem = results[i]); i++ ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }, - - "NAME": assertUsableName && function( tag, context ) { - if ( typeof context.getElementsByName !== strundefined ) { - return context.getElementsByName( name ); - } - }, - - "CLASS": assertUsableClassName && function( className, context, xml ) { - if ( typeof context.getElementsByClassName !== strundefined && !xml ) { - return context.getElementsByClassName( className ); - } - } - }, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[1] = match[1].replace( rbackslash, "" ); - - // Move the given value to match[3] whether quoted or unquoted - match[3] = ( match[4] || match[5] || "" ).replace( rbackslash, "" ); - - if ( match[2] === "~=" ) { - match[3] = " " + match[3] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 3 xn-component of xn+y argument ([+-]?\d*n|) - 4 sign of xn-component - 5 x of xn-component - 6 sign of y-component - 7 y of y-component - */ - match[1] = match[1].toLowerCase(); - - if ( match[1] === "nth" ) { - // nth-child requires argument - if ( !match[2] ) { - Sizzle.error( match[0] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[3] = +( match[3] ? match[4] + (match[5] || 1) : 2 * ( match[2] === "even" || match[2] === "odd" ) ); - match[4] = +( ( match[6] + match[7] ) || match[2] === "odd" ); - - // other types prohibit arguments - } else if ( match[2] ) { - Sizzle.error( match[0] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var unquoted, excess; - if ( matchExpr["CHILD"].test( match[0] ) ) { - return null; - } - - if ( match[3] ) { - match[2] = match[3]; - } else if ( (unquoted = match[4]) ) { - // Only check arguments that contain a pseudo - if ( rpseudo.test(unquoted) && - // Get excess from tokenize (recursively) - (excess = tokenize( unquoted, true )) && - // advance to the next closing parenthesis - (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { - - // excess is a negative index - unquoted = unquoted.slice( 0, excess ); - match[0] = match[0].slice( 0, excess ); - } - match[2] = unquoted; - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - "ID": assertGetIdNotName ? - function( id ) { - id = id.replace( rbackslash, "" ); - return function( elem ) { - return elem.getAttribute("id") === id; - }; - } : - function( id ) { - id = id.replace( rbackslash, "" ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== strundefined && elem.getAttributeNode("id"); - return node && node.value === id; - }; - }, - - "TAG": function( nodeName ) { - if ( nodeName === "*" ) { - return function() { return true; }; - } - nodeName = nodeName.replace( rbackslash, "" ).toLowerCase(); - - return function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ expando ][ className + " " ]; - - return pattern || - (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && - classCache( className, function( elem ) { - return pattern.test( elem.className || (typeof elem.getAttribute !== strundefined && elem.getAttribute("class")) || "" ); - }); - }, - - "ATTR": function( name, operator, check ) { - return function( elem, context ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.substr( result.length - check.length ) === check : - operator === "~=" ? ( " " + result + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.substr( 0, check.length + 1 ) === check + "-" : - false; - }; - }, - - "CHILD": function( type, argument, first, last ) { - - if ( type === "nth" ) { - return function( elem ) { - var node, diff, - parent = elem.parentNode; - - if ( first === 1 && last === 0 ) { - return true; - } - - if ( parent ) { - diff = 0; - for ( node = parent.firstChild; node; node = node.nextSibling ) { - if ( node.nodeType === 1 ) { - diff++; - if ( elem === node ) { - break; - } - } - } - } - - // Incorporate the offset (or cast to NaN), then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - }; - } - - return function( elem ) { - var node = elem; - - switch ( type ) { - case "only": - case "first": - while ( (node = node.previousSibling) ) { - if ( node.nodeType === 1 ) { - return false; - } - } - - if ( type === "first" ) { - return true; - } - - node = elem; - - /* falls through */ - case "last": - while ( (node = node.nextSibling) ) { - if ( node.nodeType === 1 ) { - return false; - } - } - - return true; - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction(function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf.call( seed, matched[i] ); - seed[ idx ] = !( matches[ idx ] = matched[i] ); - } - }) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - "not": markFunction(function( selector ) { - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction(function( seed, matches, context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( (elem = unmatched[i]) ) { - seed[i] = !(matches[i] = elem); - } - } - }) : - function( elem, context, xml ) { - input[0] = elem; - matcher( input, null, xml, results ); - return !results.pop(); - }; - }), - - "has": markFunction(function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - }), - - "contains": markFunction(function( text ) { - return function( elem ) { - return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; - }; - }), - - "enabled": function( elem ) { - return elem.disabled === false; - }, - - "disabled": function( elem ) { - return elem.disabled === true; - }, - - "checked": function( elem ) { - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); - }, - - "selected": function( elem ) { - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - "parent": function( elem ) { - return !Expr.pseudos["empty"]( elem ); - }, - - "empty": function( elem ) { - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is only affected by element nodes and content nodes(including text(3), cdata(4)), - // not comment, processing instructions, or others - // Thanks to Diego Perini for the nodeName shortcut - // Greater than "@" means alpha characters (specifically not starting with "#" or "?") - var nodeType; - elem = elem.firstChild; - while ( elem ) { - if ( elem.nodeName > "@" || (nodeType = elem.nodeType) === 3 || nodeType === 4 ) { - return false; - } - elem = elem.nextSibling; - } - return true; - }, - - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "text": function( elem ) { - var type, attr; - // IE6 and 7 will map elem.type to 'text' for new HTML5 types (search, etc) - // use getAttribute instead to test this case - return elem.nodeName.toLowerCase() === "input" && - (type = elem.type) === "text" && - ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === type ); - }, - - // Input types - "radio": createInputPseudo("radio"), - "checkbox": createInputPseudo("checkbox"), - "file": createInputPseudo("file"), - "password": createInputPseudo("password"), - "image": createInputPseudo("image"), - - "submit": createButtonPseudo("submit"), - "reset": createButtonPseudo("reset"), - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "focus": function( elem ) { - var doc = elem.ownerDocument; - return elem === doc.activeElement && (!doc.hasFocus || doc.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); - }, - - "active": function( elem ) { - return elem === elem.ownerDocument.activeElement; - }, - - // Positional types - "first": createPositionalPseudo(function() { - return [ 0 ]; - }), - - "last": createPositionalPseudo(function( matchIndexes, length ) { - return [ length - 1 ]; - }), - - "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - }), - - "even": createPositionalPseudo(function( matchIndexes, length ) { - for ( var i = 0; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "odd": createPositionalPseudo(function( matchIndexes, length ) { - for ( var i = 1; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { - for ( var i = argument < 0 ? argument + length : argument; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { - for ( var i = argument < 0 ? argument + length : argument; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }) - } -}; - -function siblingCheck( a, b, ret ) { - if ( a === b ) { - return ret; - } - - var cur = a.nextSibling; - - while ( cur ) { - if ( cur === b ) { - return -1; - } - - cur = cur.nextSibling; - } - - return 1; -} - -sortOrder = docElem.compareDocumentPosition ? - function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - return ( !a.compareDocumentPosition || !b.compareDocumentPosition ? - a.compareDocumentPosition : - a.compareDocumentPosition(b) & 4 - ) ? -1 : 1; - } : - function( a, b ) { - // The nodes are identical, we can exit early - if ( a === b ) { - hasDuplicate = true; - return 0; - - // Fallback to using sourceIndex (in IE) if it's available on both nodes - } else if ( a.sourceIndex && b.sourceIndex ) { - return a.sourceIndex - b.sourceIndex; - } - - var al, bl, - ap = [], - bp = [], - aup = a.parentNode, - bup = b.parentNode, - cur = aup; - - // If the nodes are siblings (or identical) we can do a quick check - if ( aup === bup ) { - return siblingCheck( a, b ); - - // If no parents were found then the nodes are disconnected - } else if ( !aup ) { - return -1; - - } else if ( !bup ) { - return 1; - } - - // Otherwise they're somewhere else in the tree so we need - // to build up a full list of the parentNodes for comparison - while ( cur ) { - ap.unshift( cur ); - cur = cur.parentNode; - } - - cur = bup; - - while ( cur ) { - bp.unshift( cur ); - cur = cur.parentNode; - } - - al = ap.length; - bl = bp.length; - - // Start walking down the tree looking for a discrepancy - for ( var i = 0; i < al && i < bl; i++ ) { - if ( ap[i] !== bp[i] ) { - return siblingCheck( ap[i], bp[i] ); - } - } - - // We ended someplace up the tree so do a sibling check - return i === al ? - siblingCheck( a, bp[i], -1 ) : - siblingCheck( ap[i], b, 1 ); - }; - -// Always assume the presence of duplicates if sort doesn't -// pass them to our comparison function (as in Google Chrome). -[0, 0].sort( sortOrder ); -baseHasDuplicate = !hasDuplicate; - -// Document sorting and removing duplicates -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - i = 1, - j = 0; - - hasDuplicate = baseHasDuplicate; - results.sort( sortOrder ); - - if ( hasDuplicate ) { - for ( ; (elem = results[i]); i++ ) { - if ( elem === results[ i - 1 ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - return results; -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -function tokenize( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ expando ][ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || (match = rcomma.exec( soFar )) ) { - if ( match ) { - // Don't consume trailing commas as valid - soFar = soFar.slice( match[0].length ) || soFar; - } - groups.push( tokens = [] ); - } - - matched = false; - - // Combinators - if ( (match = rcombinators.exec( soFar )) ) { - tokens.push( matched = new Token( match.shift() ) ); - soFar = soFar.slice( matched.length ); - - // Cast descendant combinators to space - matched.type = match[0].replace( rtrim, " " ); - } - - // Filters - for ( type in Expr.filter ) { - if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || - (match = preFilters[ type ]( match ))) ) { - - tokens.push( matched = new Token( match.shift() ) ); - soFar = soFar.slice( matched.length ); - matched.type = type; - matched.matches = match; - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - checkNonElements = base && combinator.dir === "parentNode", - doneName = done++; - - return combinator.first ? - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( (elem = elem[ dir ]) ) { - if ( checkNonElements || elem.nodeType === 1 ) { - return matcher( elem, context, xml ); - } - } - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - // We can't set arbitrary data on XML nodes, so they don't benefit from dir caching - if ( !xml ) { - var cache, - dirkey = dirruns + " " + doneName + " ", - cachedkey = dirkey + cachedruns; - while ( (elem = elem[ dir ]) ) { - if ( checkNonElements || elem.nodeType === 1 ) { - if ( (cache = elem[ expando ]) === cachedkey ) { - return elem.sizset; - } else if ( typeof cache === "string" && cache.indexOf(dirkey) === 0 ) { - if ( elem.sizset ) { - return elem; - } - } else { - elem[ expando ] = cachedkey; - if ( matcher( elem, context, xml ) ) { - elem.sizset = true; - return elem; - } - elem.sizset = false; - } - } - } - } else { - while ( (elem = elem[ dir ]) ) { - if ( checkNonElements || elem.nodeType === 1 ) { - if ( matcher( elem, context, xml ) ) { - return elem; - } - } - } - } - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[i]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[0]; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( (elem = unmatched[i]) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction(function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( (elem = temp[i]) ) { - matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) ) { - // Restore matcherIn since elem is not yet a final match - temp.push( (matcherIn[i] = elem) ); - } - } - postFinder( null, (matcherOut = []), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) && - (temp = postFinder ? indexOf.call( seed, elem ) : preMap[i]) > -1 ) { - - seed[temp] = !(results[temp] = elem); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - }); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[0].type ], - implicitRelative = leadingRelative || Expr.relative[" "], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf.call( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - return ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - (checkContext = context).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - } ]; - - for ( ; i < len; i++ ) { - if ( (matcher = Expr.relative[ tokens[i].type ]) ) { - matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; - } else { - matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[j].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && tokens.slice( 0, i - 1 ).join("").replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), - j < len && tokens.join("") - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, expandContext ) { - var elem, j, matcher, - setMatched = [], - matchedCount = 0, - i = "0", - unmatched = seed && [], - outermost = expandContext != null, - contextBackup = outermostContext, - // We must always have either seed elements or context - elems = seed || byElement && Expr.find["TAG"]( "*", expandContext && context.parentNode || context ), - // Nested matchers should use non-integer dirruns - dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.E); - - if ( outermost ) { - outermostContext = context !== document && context; - cachedruns = superMatcher.el; - } - - // Add elements passing elementMatchers directly to results - for ( ; (elem = elems[i]) != null; i++ ) { - if ( byElement && elem ) { - for ( j = 0; (matcher = elementMatchers[j]); j++ ) { - if ( matcher( elem, context, xml ) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - cachedruns = ++superMatcher.el; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - // They will have gone through all possible matchers - if ( (elem = !matcher && elem) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // Apply set filters to unmatched elements - matchedCount += i; - if ( bySet && i !== matchedCount ) { - for ( j = 0; (matcher = setMatchers[j]); j++ ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !(unmatched[i] || setMatched[i]) ) { - setMatched[i] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - superMatcher.el = 0; - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, group /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ expando ][ selector + " " ]; - - if ( !cached ) { - // Generate a function of recursive functions that can be used to check each element - if ( !group ) { - group = tokenize( selector ); - } - i = group.length; - while ( i-- ) { - cached = matcherFromTokens( group[i] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); - } - return cached; -}; - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[i], results ); - } - return results; -} - -function select( selector, context, results, seed, xml ) { - var i, tokens, token, type, find, - match = tokenize( selector ), - j = match.length; - - if ( !seed ) { - // Try to minimize operations if there is only one group - if ( match.length === 1 ) { - - // Take a shortcut and set the context if the root selector is an ID - tokens = match[0] = match[0].slice( 0 ); - if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && - context.nodeType === 9 && !xml && - Expr.relative[ tokens[1].type ] ) { - - context = Expr.find["ID"]( token.matches[0].replace( rbackslash, "" ), context, xml )[0]; - if ( !context ) { - return results; - } - - selector = selector.slice( tokens.shift().length ); - } - - // Fetch a seed set for right-to-left matching - for ( i = matchExpr["POS"].test( selector ) ? -1 : tokens.length - 1; i >= 0; i-- ) { - token = tokens[i]; - - // Abort if we hit a combinator - if ( Expr.relative[ (type = token.type) ] ) { - break; - } - if ( (find = Expr.find[ type ]) ) { - // Search, expanding context for leading sibling combinators - if ( (seed = find( - token.matches[0].replace( rbackslash, "" ), - rsibling.test( tokens[0].type ) && context.parentNode || context, - xml - )) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && tokens.join(""); - if ( !selector ) { - push.apply( results, slice.call( seed, 0 ) ); - return results; - } - - break; - } - } - } - } - } - - // Compile and execute a filtering function - // Provide `match` to avoid retokenization if we modified the selector above - compile( selector, match )( - seed, - context, - xml, - results, - rsibling.test( selector ) - ); - return results; -} - -if ( document.querySelectorAll ) { - (function() { - var disconnectedMatch, - oldSelect = select, - rescape = /'|\\/g, - rattributeQuotes = /\=[\x20\t\r\n\f]*([^'"\]]*)[\x20\t\r\n\f]*\]/g, - - // qSa(:focus) reports false when true (Chrome 21), no need to also add to buggyMatches since matches checks buggyQSA - // A support test would require too much code (would include document ready) - rbuggyQSA = [ ":focus" ], - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - // A support test would require too much code (would include document ready) - // just skip matchesSelector for :active - rbuggyMatches = [ ":active" ], - matches = docElem.matchesSelector || - docElem.mozMatchesSelector || - docElem.webkitMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector; - - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert(function( div ) { - // Select is set to empty string on purpose - // This is to test IE's treatment of not explictly - // setting a boolean content attribute, - // since its presence should be enough - // http://bugs.jquery.com/ticket/12359 - div.innerHTML = ""; - - // IE8 - Some boolean attributes are not treated correctly - if ( !div.querySelectorAll("[selected]").length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:checked|disabled|ismap|multiple|readonly|selected|value)" ); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here (do not put tests after this one) - if ( !div.querySelectorAll(":checked").length ) { - rbuggyQSA.push(":checked"); - } - }); - - assert(function( div ) { - - // Opera 10-12/IE9 - ^= $= *= and empty values - // Should not select anything - div.innerHTML = "

"; - if ( div.querySelectorAll("[test^='']").length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:\"\"|'')" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here (do not put tests after this one) - div.innerHTML = ""; - if ( !div.querySelectorAll(":enabled").length ) { - rbuggyQSA.push(":enabled", ":disabled"); - } - }); - - // rbuggyQSA always contains :focus, so no need for a length check - rbuggyQSA = /* rbuggyQSA.length && */ new RegExp( rbuggyQSA.join("|") ); - - select = function( selector, context, results, seed, xml ) { - // Only use querySelectorAll when not filtering, - // when this is not xml, - // and when no QSA bugs apply - if ( !seed && !xml && !rbuggyQSA.test( selector ) ) { - var groups, i, - old = true, - nid = expando, - newContext = context, - newSelector = context.nodeType === 9 && selector; - - // qSA works strangely on Element-rooted queries - // We can work around this by specifying an extra ID on the root - // and working up from there (Thanks to Andrew Dupont for the technique) - // IE 8 doesn't work on object elements - if ( context.nodeType === 1 && context.nodeName.toLowerCase() !== "object" ) { - groups = tokenize( selector ); - - if ( (old = context.getAttribute("id")) ) { - nid = old.replace( rescape, "\\$&" ); - } else { - context.setAttribute( "id", nid ); - } - nid = "[id='" + nid + "'] "; - - i = groups.length; - while ( i-- ) { - groups[i] = nid + groups[i].join(""); - } - newContext = rsibling.test( selector ) && context.parentNode || context; - newSelector = groups.join(","); - } - - if ( newSelector ) { - try { - push.apply( results, slice.call( newContext.querySelectorAll( - newSelector - ), 0 ) ); - return results; - } catch(qsaError) { - } finally { - if ( !old ) { - context.removeAttribute("id"); - } - } - } - } - - return oldSelect( selector, context, results, seed, xml ); - }; - - if ( matches ) { - assert(function( div ) { - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - disconnectedMatch = matches.call( div, "div" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - try { - matches.call( div, "[test!='']:sizzle" ); - rbuggyMatches.push( "!=", pseudos ); - } catch ( e ) {} - }); - - // rbuggyMatches always contains :active and :focus, so no need for a length check - rbuggyMatches = /* rbuggyMatches.length && */ new RegExp( rbuggyMatches.join("|") ); - - Sizzle.matchesSelector = function( elem, expr ) { - // Make sure that attribute selectors are quoted - expr = expr.replace( rattributeQuotes, "='$1']" ); - - // rbuggyMatches always contains :active, so no need for an existence check - if ( !isXML( elem ) && !rbuggyMatches.test( expr ) && !rbuggyQSA.test( expr ) ) { - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || disconnectedMatch || - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch(e) {} - } - - return Sizzle( expr, null, null, [ elem ] ).length > 0; - }; - } - })(); -} - -// Deprecated -Expr.pseudos["nth"] = Expr.pseudos["eq"]; - -// Back-compat -function setFilters() {} -Expr.filters = setFilters.prototype = Expr.pseudos; -Expr.setFilters = new setFilters(); - -// Override sizzle attribute retrieval -Sizzle.attr = jQuery.attr; -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; -jQuery.expr[":"] = jQuery.expr.pseudos; -jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; - - -})( window ); -var runtil = /Until$/, - rparentsprev = /^(?:parents|prev(?:Until|All))/, - isSimple = /^.[^:#\[\.,]*$/, - rneedsContext = jQuery.expr.match.needsContext, - // methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend({ - find: function( selector ) { - var i, l, length, n, r, ret, - self = this; - - if ( typeof selector !== "string" ) { - return jQuery( selector ).filter(function() { - for ( i = 0, l = self.length; i < l; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - }); - } - - ret = this.pushStack( "", "find", selector ); - - for ( i = 0, l = this.length; i < l; i++ ) { - length = ret.length; - jQuery.find( selector, this[i], ret ); - - if ( i > 0 ) { - // Make sure that the results are unique - for ( n = length; n < ret.length; n++ ) { - for ( r = 0; r < length; r++ ) { - if ( ret[r] === ret[n] ) { - ret.splice(n--, 1); - break; - } - } - } - } - } - - return ret; - }, - - has: function( target ) { - var i, - targets = jQuery( target, this ), - len = targets.length; - - return this.filter(function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( this, targets[i] ) ) { - return true; - } - } - }); - }, - - not: function( selector ) { - return this.pushStack( winnow(this, selector, false), "not", selector); - }, - - filter: function( selector ) { - return this.pushStack( winnow(this, selector, true), "filter", selector ); - }, - - is: function( selector ) { - return !!selector && ( - typeof selector === "string" ? - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - rneedsContext.test( selector ) ? - jQuery( selector, this.context ).index( this[0] ) >= 0 : - jQuery.filter( selector, this ).length > 0 : - this.filter( selector ).length > 0 ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - ret = [], - pos = rneedsContext.test( selectors ) || typeof selectors !== "string" ? - jQuery( selectors, context || this.context ) : - 0; - - for ( ; i < l; i++ ) { - cur = this[i]; - - while ( cur && cur.ownerDocument && cur !== context && cur.nodeType !== 11 ) { - if ( pos ? pos.index(cur) > -1 : jQuery.find.matchesSelector(cur, selectors) ) { - ret.push( cur ); - break; - } - cur = cur.parentNode; - } - } - - ret = ret.length > 1 ? jQuery.unique( ret ) : ret; - - return this.pushStack( ret, "closest", selectors ); - }, - - // Determine the position of an element within - // the matched set of elements - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[0] && this[0].parentNode ) ? this.prevAll().length : -1; - } - - // index in selector - if ( typeof elem === "string" ) { - return jQuery.inArray( this[0], jQuery( elem ) ); - } - - // Locate the position of the desired element - return jQuery.inArray( - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[0] : elem, this ); - }, - - add: function( selector, context ) { - var set = typeof selector === "string" ? - jQuery( selector, context ) : - jQuery.makeArray( selector && selector.nodeType ? [ selector ] : selector ), - all = jQuery.merge( this.get(), set ); - - return this.pushStack( isDisconnected( set[0] ) || isDisconnected( all[0] ) ? - all : - jQuery.unique( all ) ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter(selector) - ); - } -}); - -jQuery.fn.andSelf = jQuery.fn.addBack; - -// A painfully simple check to see if an element is disconnected -// from a document (should be improved, where feasible). -function isDisconnected( node ) { - return !node || !node.parentNode || node.parentNode.nodeType === 11; -} - -function sibling( cur, dir ) { - do { - cur = cur[ dir ]; - } while ( cur && cur.nodeType !== 1 ); - - return cur; -} - -jQuery.each({ - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return jQuery.dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, i, until ) { - return jQuery.dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return jQuery.dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return jQuery.dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, i, until ) { - return jQuery.dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, i, until ) { - return jQuery.dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return jQuery.sibling( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return jQuery.sibling( elem.firstChild ); - }, - contents: function( elem ) { - return jQuery.nodeName( elem, "iframe" ) ? - elem.contentDocument || elem.contentWindow.document : - jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var ret = jQuery.map( this, fn, until ); - - if ( !runtil.test( name ) ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - ret = jQuery.filter( selector, ret ); - } - - ret = this.length > 1 && !guaranteedUnique[ name ] ? jQuery.unique( ret ) : ret; - - if ( this.length > 1 && rparentsprev.test( name ) ) { - ret = ret.reverse(); - } - - return this.pushStack( ret, name, core_slice.call( arguments ).join(",") ); - }; -}); - -jQuery.extend({ - filter: function( expr, elems, not ) { - if ( not ) { - expr = ":not(" + expr + ")"; - } - - return elems.length === 1 ? - jQuery.find.matchesSelector(elems[0], expr) ? [ elems[0] ] : [] : - jQuery.find.matches(expr, elems); - }, - - dir: function( elem, dir, until ) { - var matched = [], - cur = elem[ dir ]; - - while ( cur && cur.nodeType !== 9 && (until === undefined || cur.nodeType !== 1 || !jQuery( cur ).is( until )) ) { - if ( cur.nodeType === 1 ) { - matched.push( cur ); - } - cur = cur[dir]; - } - return matched; - }, - - sibling: function( n, elem ) { - var r = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - r.push( n ); - } - } - - return r; - } -}); - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, keep ) { - - // Can't pass null or undefined to indexOf in Firefox 4 - // Set to 0 to skip string check - qualifier = qualifier || 0; - - if ( jQuery.isFunction( qualifier ) ) { - return jQuery.grep(elements, function( elem, i ) { - var retVal = !!qualifier.call( elem, i, elem ); - return retVal === keep; - }); - - } else if ( qualifier.nodeType ) { - return jQuery.grep(elements, function( elem, i ) { - return ( elem === qualifier ) === keep; - }); - - } else if ( typeof qualifier === "string" ) { - var filtered = jQuery.grep(elements, function( elem ) { - return elem.nodeType === 1; - }); - - if ( isSimple.test( qualifier ) ) { - return jQuery.filter(qualifier, filtered, !keep); - } else { - qualifier = jQuery.filter( qualifier, filtered ); - } - } - - return jQuery.grep(elements, function( elem, i ) { - return ( jQuery.inArray( elem, qualifier ) >= 0 ) === keep; - }); -} -function createSafeFragment( document ) { - var list = nodeNames.split( "|" ), - safeFrag = document.createDocumentFragment(); - - if ( safeFrag.createElement ) { - while ( list.length ) { - safeFrag.createElement( - list.pop() - ); - } - } - return safeFrag; -} - -var nodeNames = "abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|" + - "header|hgroup|mark|meter|nav|output|progress|section|summary|time|video", - rinlinejQuery = / jQuery\d+="(?:null|\d+)"/g, - rleadingWhitespace = /^\s+/, - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi, - rtagName = /<([\w:]+)/, - rtbody = /]", "i"), - rcheckableType = /^(?:checkbox|radio)$/, - // checked="checked" or checked - rchecked = /checked\s*(?:[^=]|=\s*.checked.)/i, - rscriptType = /\/(java|ecma)script/i, - rcleanScript = /^\s*\s*$/g, - wrapMap = { - option: [ 1, "" ], - legend: [ 1, "
", "
" ], - thead: [ 1, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - col: [ 2, "", "
" ], - area: [ 1, "", "" ], - _default: [ 0, "", "" ] - }, - safeFragment = createSafeFragment( document ), - fragmentDiv = safeFragment.appendChild( document.createElement("div") ); - -wrapMap.optgroup = wrapMap.option; -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - -// IE6-8 can't serialize link, script, style, or any html5 (NoScope) tags, -// unless wrapped in a div with non-breaking characters in front of it. -if ( !jQuery.support.htmlSerialize ) { - wrapMap._default = [ 1, "X
", "
" ]; -} - -jQuery.fn.extend({ - text: function( value ) { - return jQuery.access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().append( ( this[0] && this[0].ownerDocument || document ).createTextNode( value ) ); - }, null, value, arguments.length ); - }, - - wrapAll: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each(function(i) { - jQuery(this).wrapAll( html.call(this, i) ); - }); - } - - if ( this[0] ) { - // The elements to wrap the target around - var wrap = jQuery( html, this[0].ownerDocument ).eq(0).clone(true); - - if ( this[0].parentNode ) { - wrap.insertBefore( this[0] ); - } - - wrap.map(function() { - var elem = this; - - while ( elem.firstChild && elem.firstChild.nodeType === 1 ) { - elem = elem.firstChild; - } - - return elem; - }).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each(function(i) { - jQuery(this).wrapInner( html.call(this, i) ); - }); - } - - return this.each(function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - }); - }, - - wrap: function( html ) { - var isFunction = jQuery.isFunction( html ); - - return this.each(function(i) { - jQuery( this ).wrapAll( isFunction ? html.call(this, i) : html ); - }); - }, - - unwrap: function() { - return this.parent().each(function() { - if ( !jQuery.nodeName( this, "body" ) ) { - jQuery( this ).replaceWith( this.childNodes ); - } - }).end(); - }, - - append: function() { - return this.domManip(arguments, true, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 ) { - this.appendChild( elem ); - } - }); - }, - - prepend: function() { - return this.domManip(arguments, true, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 ) { - this.insertBefore( elem, this.firstChild ); - } - }); - }, - - before: function() { - if ( !isDisconnected( this[0] ) ) { - return this.domManip(arguments, false, function( elem ) { - this.parentNode.insertBefore( elem, this ); - }); - } - - if ( arguments.length ) { - var set = jQuery.clean( arguments ); - return this.pushStack( jQuery.merge( set, this ), "before", this.selector ); - } - }, - - after: function() { - if ( !isDisconnected( this[0] ) ) { - return this.domManip(arguments, false, function( elem ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - }); - } - - if ( arguments.length ) { - var set = jQuery.clean( arguments ); - return this.pushStack( jQuery.merge( this, set ), "after", this.selector ); - } - }, - - // keepData is for internal use only--do not document - remove: function( selector, keepData ) { - var elem, - i = 0; - - for ( ; (elem = this[i]) != null; i++ ) { - if ( !selector || jQuery.filter( selector, [ elem ] ).length ) { - if ( !keepData && elem.nodeType === 1 ) { - jQuery.cleanData( elem.getElementsByTagName("*") ); - jQuery.cleanData( [ elem ] ); - } - - if ( elem.parentNode ) { - elem.parentNode.removeChild( elem ); - } - } - } - - return this; - }, - - empty: function() { - var elem, - i = 0; - - for ( ; (elem = this[i]) != null; i++ ) { - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( elem.getElementsByTagName("*") ); - } - - // Remove any remaining nodes - while ( elem.firstChild ) { - elem.removeChild( elem.firstChild ); - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function () { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - }); - }, - - html: function( value ) { - return jQuery.access( this, function( value ) { - var elem = this[0] || {}, - i = 0, - l = this.length; - - if ( value === undefined ) { - return elem.nodeType === 1 ? - elem.innerHTML.replace( rinlinejQuery, "" ) : - undefined; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - ( jQuery.support.htmlSerialize || !rnoshimcache.test( value ) ) && - ( jQuery.support.leadingWhitespace || !rleadingWhitespace.test( value ) ) && - !wrapMap[ ( rtagName.exec( value ) || ["", ""] )[1].toLowerCase() ] ) { - - value = value.replace( rxhtmlTag, "<$1>" ); - - try { - for (; i < l; i++ ) { - // Remove element nodes and prevent memory leaks - elem = this[i] || {}; - if ( elem.nodeType === 1 ) { - jQuery.cleanData( elem.getElementsByTagName( "*" ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch(e) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function( value ) { - if ( !isDisconnected( this[0] ) ) { - // Make sure that the elements are removed from the DOM before they are inserted - // this can help fix replacing a parent with child elements - if ( jQuery.isFunction( value ) ) { - return this.each(function(i) { - var self = jQuery(this), old = self.html(); - self.replaceWith( value.call( this, i, old ) ); - }); - } - - if ( typeof value !== "string" ) { - value = jQuery( value ).detach(); - } - - return this.each(function() { - var next = this.nextSibling, - parent = this.parentNode; - - jQuery( this ).remove(); - - if ( next ) { - jQuery(next).before( value ); - } else { - jQuery(parent).append( value ); - } - }); - } - - return this.length ? - this.pushStack( jQuery(jQuery.isFunction(value) ? value() : value), "replaceWith", value ) : - this; - }, - - detach: function( selector ) { - return this.remove( selector, true ); - }, - - domManip: function( args, table, callback ) { - - // Flatten any nested arrays - args = [].concat.apply( [], args ); - - var results, first, fragment, iNoClone, - i = 0, - value = args[0], - scripts = [], - l = this.length; - - // We can't cloneNode fragments that contain checked, in WebKit - if ( !jQuery.support.checkClone && l > 1 && typeof value === "string" && rchecked.test( value ) ) { - return this.each(function() { - jQuery(this).domManip( args, table, callback ); - }); - } - - if ( jQuery.isFunction(value) ) { - return this.each(function(i) { - var self = jQuery(this); - args[0] = value.call( this, i, table ? self.html() : undefined ); - self.domManip( args, table, callback ); - }); - } - - if ( this[0] ) { - results = jQuery.buildFragment( args, this, scripts ); - fragment = results.fragment; - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - if ( first ) { - table = table && jQuery.nodeName( first, "tr" ); - - // Use the original fragment for the last item instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - // Fragments from the fragment cache must always be cloned and never used in place. - for ( iNoClone = results.cacheable || l - 1; i < l; i++ ) { - callback.call( - table && jQuery.nodeName( this[i], "table" ) ? - findOrAppend( this[i], "tbody" ) : - this[i], - i === iNoClone ? - fragment : - jQuery.clone( fragment, true, true ) - ); - } - } - - // Fix #11809: Avoid leaking memory - fragment = first = null; - - if ( scripts.length ) { - jQuery.each( scripts, function( i, elem ) { - if ( elem.src ) { - if ( jQuery.ajax ) { - jQuery.ajax({ - url: elem.src, - type: "GET", - dataType: "script", - async: false, - global: false, - "throws": true - }); - } else { - jQuery.error("no ajax"); - } - } else { - jQuery.globalEval( ( elem.text || elem.textContent || elem.innerHTML || "" ).replace( rcleanScript, "" ) ); - } - - if ( elem.parentNode ) { - elem.parentNode.removeChild( elem ); - } - }); - } - } - - return this; - } -}); - -function findOrAppend( elem, tag ) { - return elem.getElementsByTagName( tag )[0] || elem.appendChild( elem.ownerDocument.createElement( tag ) ); -} - -function cloneCopyEvent( src, dest ) { - - if ( dest.nodeType !== 1 || !jQuery.hasData( src ) ) { - return; - } - - var type, i, l, - oldData = jQuery._data( src ), - curData = jQuery._data( dest, oldData ), - events = oldData.events; - - if ( events ) { - delete curData.handle; - curData.events = {}; - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - - // make the cloned public data object a copy from the original - if ( curData.data ) { - curData.data = jQuery.extend( {}, curData.data ); - } -} - -function cloneFixAttributes( src, dest ) { - var nodeName; - - // We do not need to do anything for non-Elements - if ( dest.nodeType !== 1 ) { - return; - } - - // clearAttributes removes the attributes, which we don't want, - // but also removes the attachEvent events, which we *do* want - if ( dest.clearAttributes ) { - dest.clearAttributes(); - } - - // mergeAttributes, in contrast, only merges back on the - // original attributes, not the events - if ( dest.mergeAttributes ) { - dest.mergeAttributes( src ); - } - - nodeName = dest.nodeName.toLowerCase(); - - if ( nodeName === "object" ) { - // IE6-10 improperly clones children of object elements using classid. - // IE10 throws NoModificationAllowedError if parent is null, #12132. - if ( dest.parentNode ) { - dest.outerHTML = src.outerHTML; - } - - // This path appears unavoidable for IE9. When cloning an object - // element in IE9, the outerHTML strategy above is not sufficient. - // If the src has innerHTML and the destination does not, - // copy the src.innerHTML into the dest.innerHTML. #10324 - if ( jQuery.support.html5Clone && (src.innerHTML && !jQuery.trim(dest.innerHTML)) ) { - dest.innerHTML = src.innerHTML; - } - - } else if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - // IE6-8 fails to persist the checked state of a cloned checkbox - // or radio button. Worse, IE6-7 fail to give the cloned element - // a checked appearance if the defaultChecked value isn't also set - - dest.defaultChecked = dest.checked = src.checked; - - // IE6-7 get confused and end up setting the value of a cloned - // checkbox/radio button to an empty string instead of "on" - if ( dest.value !== src.value ) { - dest.value = src.value; - } - - // IE6-8 fails to return the selected option to the default selected - // state when cloning options - } else if ( nodeName === "option" ) { - dest.selected = src.defaultSelected; - - // IE6-8 fails to set the defaultValue to the correct value when - // cloning other types of input fields - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - - // IE blanks contents when cloning scripts - } else if ( nodeName === "script" && dest.text !== src.text ) { - dest.text = src.text; - } - - // Event data gets referenced instead of copied if the expando - // gets copied too - dest.removeAttribute( jQuery.expando ); -} - -jQuery.buildFragment = function( args, context, scripts ) { - var fragment, cacheable, cachehit, - first = args[ 0 ]; - - // Set context from what may come in as undefined or a jQuery collection or a node - // Updated to fix #12266 where accessing context[0] could throw an exception in IE9/10 & - // also doubles as fix for #8950 where plain objects caused createDocumentFragment exception - context = context || document; - context = !context.nodeType && context[0] || context; - context = context.ownerDocument || context; - - // Only cache "small" (1/2 KB) HTML strings that are associated with the main document - // Cloning options loses the selected state, so don't cache them - // IE 6 doesn't like it when you put or elements in a fragment - // Also, WebKit does not clone 'checked' attributes on cloneNode, so don't cache - // Lastly, IE6,7,8 will not correctly reuse cached fragments that were created from unknown elems #10501 - if ( args.length === 1 && typeof first === "string" && first.length < 512 && context === document && - first.charAt(0) === "<" && !rnocache.test( first ) && - (jQuery.support.checkClone || !rchecked.test( first )) && - (jQuery.support.html5Clone || !rnoshimcache.test( first )) ) { - - // Mark cacheable and look for a hit - cacheable = true; - fragment = jQuery.fragments[ first ]; - cachehit = fragment !== undefined; - } - - if ( !fragment ) { - fragment = context.createDocumentFragment(); - jQuery.clean( args, context, fragment, scripts ); - - // Update the cache, but only store false - // unless this is a second parsing of the same content - if ( cacheable ) { - jQuery.fragments[ first ] = cachehit && fragment; - } - } - - return { fragment: fragment, cacheable: cacheable }; -}; - -jQuery.fragments = {}; - -jQuery.each({ - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - i = 0, - ret = [], - insert = jQuery( selector ), - l = insert.length, - parent = this.length === 1 && this[0].parentNode; - - if ( (parent == null || parent && parent.nodeType === 11 && parent.childNodes.length === 1) && l === 1 ) { - insert[ original ]( this[0] ); - return this; - } else { - for ( ; i < l; i++ ) { - elems = ( i > 0 ? this.clone(true) : this ).get(); - jQuery( insert[i] )[ original ]( elems ); - ret = ret.concat( elems ); - } - - return this.pushStack( ret, name, insert.selector ); - } - }; -}); - -function getAll( elem ) { - if ( typeof elem.getElementsByTagName !== "undefined" ) { - return elem.getElementsByTagName( "*" ); - - } else if ( typeof elem.querySelectorAll !== "undefined" ) { - return elem.querySelectorAll( "*" ); - - } else { - return []; - } -} - -// Used in clean, fixes the defaultChecked property -function fixDefaultChecked( elem ) { - if ( rcheckableType.test( elem.type ) ) { - elem.defaultChecked = elem.checked; - } -} - -jQuery.extend({ - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var srcElements, - destElements, - i, - clone; - - if ( jQuery.support.html5Clone || jQuery.isXMLDoc(elem) || !rnoshimcache.test( "<" + elem.nodeName + ">" ) ) { - clone = elem.cloneNode( true ); - - // IE<=8 does not properly clone detached, unknown element nodes - } else { - fragmentDiv.innerHTML = elem.outerHTML; - fragmentDiv.removeChild( clone = fragmentDiv.firstChild ); - } - - if ( (!jQuery.support.noCloneEvent || !jQuery.support.noCloneChecked) && - (elem.nodeType === 1 || elem.nodeType === 11) && !jQuery.isXMLDoc(elem) ) { - // IE copies events bound via attachEvent when using cloneNode. - // Calling detachEvent on the clone will also remove the events - // from the original. In order to get around this, we use some - // proprietary methods to clear the events. Thanks to MooTools - // guys for this hotness. - - cloneFixAttributes( elem, clone ); - - // Using Sizzle here is crazy slow, so we use getElementsByTagName instead - srcElements = getAll( elem ); - destElements = getAll( clone ); - - // Weird iteration because IE will replace the length property - // with an element if you are cloning the body and one of the - // elements on the page has a name or id of "length" - for ( i = 0; srcElements[i]; ++i ) { - // Ensure that the destination node is not null; Fixes #9587 - if ( destElements[i] ) { - cloneFixAttributes( srcElements[i], destElements[i] ); - } - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - cloneCopyEvent( elem, clone ); - - if ( deepDataAndEvents ) { - srcElements = getAll( elem ); - destElements = getAll( clone ); - - for ( i = 0; srcElements[i]; ++i ) { - cloneCopyEvent( srcElements[i], destElements[i] ); - } - } - } - - srcElements = destElements = null; - - // Return the cloned set - return clone; - }, - - clean: function( elems, context, fragment, scripts ) { - var i, j, elem, tag, wrap, depth, div, hasBody, tbody, len, handleScript, jsTags, - safe = context === document && safeFragment, - ret = []; - - // Ensure that context is a document - if ( !context || typeof context.createDocumentFragment === "undefined" ) { - context = document; - } - - // Use the already-created safe fragment if context permits - for ( i = 0; (elem = elems[i]) != null; i++ ) { - if ( typeof elem === "number" ) { - elem += ""; - } - - if ( !elem ) { - continue; - } - - // Convert html string into DOM nodes - if ( typeof elem === "string" ) { - if ( !rhtml.test( elem ) ) { - elem = context.createTextNode( elem ); - } else { - // Ensure a safe container in which to render the html - safe = safe || createSafeFragment( context ); - div = context.createElement("div"); - safe.appendChild( div ); - - // Fix "XHTML"-style tags in all browsers - elem = elem.replace(rxhtmlTag, "<$1>"); - - // Go to html and back, then peel off extra wrappers - tag = ( rtagName.exec( elem ) || ["", ""] )[1].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - depth = wrap[0]; - div.innerHTML = wrap[1] + elem + wrap[2]; - - // Move to the right depth - while ( depth-- ) { - div = div.lastChild; - } - - // Remove IE's autoinserted from table fragments - if ( !jQuery.support.tbody ) { - - // String was a , *may* have spurious - hasBody = rtbody.test(elem); - tbody = tag === "table" && !hasBody ? - div.firstChild && div.firstChild.childNodes : - - // String was a bare or - wrap[1] === "
" && !hasBody ? - div.childNodes : - []; - - for ( j = tbody.length - 1; j >= 0 ; --j ) { - if ( jQuery.nodeName( tbody[ j ], "tbody" ) && !tbody[ j ].childNodes.length ) { - tbody[ j ].parentNode.removeChild( tbody[ j ] ); - } - } - } - - // IE completely kills leading whitespace when innerHTML is used - if ( !jQuery.support.leadingWhitespace && rleadingWhitespace.test( elem ) ) { - div.insertBefore( context.createTextNode( rleadingWhitespace.exec(elem)[0] ), div.firstChild ); - } - - elem = div.childNodes; - - // Take out of fragment container (we need a fresh div each time) - div.parentNode.removeChild( div ); - } - } - - if ( elem.nodeType ) { - ret.push( elem ); - } else { - jQuery.merge( ret, elem ); - } - } - - // Fix #11356: Clear elements from safeFragment - if ( div ) { - elem = div = safe = null; - } - - // Reset defaultChecked for any radios and checkboxes - // about to be appended to the DOM in IE 6/7 (#8060) - if ( !jQuery.support.appendChecked ) { - for ( i = 0; (elem = ret[i]) != null; i++ ) { - if ( jQuery.nodeName( elem, "input" ) ) { - fixDefaultChecked( elem ); - } else if ( typeof elem.getElementsByTagName !== "undefined" ) { - jQuery.grep( elem.getElementsByTagName("input"), fixDefaultChecked ); - } - } - } - - // Append elements to a provided document fragment - if ( fragment ) { - // Special handling of each script element - handleScript = function( elem ) { - // Check if we consider it executable - if ( !elem.type || rscriptType.test( elem.type ) ) { - // Detach the script and store it in the scripts array (if provided) or the fragment - // Return truthy to indicate that it has been handled - return scripts ? - scripts.push( elem.parentNode ? elem.parentNode.removeChild( elem ) : elem ) : - fragment.appendChild( elem ); - } - }; - - for ( i = 0; (elem = ret[i]) != null; i++ ) { - // Check if we're done after handling an executable script - if ( !( jQuery.nodeName( elem, "script" ) && handleScript( elem ) ) ) { - // Append to fragment and handle embedded scripts - fragment.appendChild( elem ); - if ( typeof elem.getElementsByTagName !== "undefined" ) { - // handleScript alters the DOM, so use jQuery.merge to ensure snapshot iteration - jsTags = jQuery.grep( jQuery.merge( [], elem.getElementsByTagName("script") ), handleScript ); - - // Splice the scripts into ret after their former ancestor and advance our index beyond them - ret.splice.apply( ret, [i + 1, 0].concat( jsTags ) ); - i += jsTags.length; - } - } - } - } - - return ret; - }, - - cleanData: function( elems, /* internal */ acceptData ) { - var data, id, elem, type, - i = 0, - internalKey = jQuery.expando, - cache = jQuery.cache, - deleteExpando = jQuery.support.deleteExpando, - special = jQuery.event.special; - - for ( ; (elem = elems[i]) != null; i++ ) { - - if ( acceptData || jQuery.acceptData( elem ) ) { - - id = elem[ internalKey ]; - data = id && cache[ id ]; - - if ( data ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Remove cache only if it was not already removed by jQuery.event.remove - if ( cache[ id ] ) { - - delete cache[ id ]; - - // IE does not allow us to delete expando properties from nodes, - // nor does it have a removeAttribute function on Document nodes; - // we must handle all of these cases - if ( deleteExpando ) { - delete elem[ internalKey ]; - - } else if ( elem.removeAttribute ) { - elem.removeAttribute( internalKey ); - - } else { - elem[ internalKey ] = null; - } - - jQuery.deletedIds.push( id ); - } - } - } - } - } -}); -// Limit scope pollution from any deprecated API -(function() { - -var matched, browser; - -// Use of jQuery.browser is frowned upon. -// More details: http://api.jquery.com/jQuery.browser -// jQuery.uaMatch maintained for back-compat -jQuery.uaMatch = function( ua ) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec( ua ) || - /(webkit)[ \/]([\w.]+)/.exec( ua ) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec( ua ) || - /(msie) ([\w.]+)/.exec( ua ) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec( ua ) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; -}; - -matched = jQuery.uaMatch( navigator.userAgent ); -browser = {}; - -if ( matched.browser ) { - browser[ matched.browser ] = true; - browser.version = matched.version; -} - -// Chrome is Webkit, but Webkit is also Safari. -if ( browser.chrome ) { - browser.webkit = true; -} else if ( browser.webkit ) { - browser.safari = true; -} - -jQuery.browser = browser; - -jQuery.sub = function() { - function jQuerySub( selector, context ) { - return new jQuerySub.fn.init( selector, context ); - } - jQuery.extend( true, jQuerySub, this ); - jQuerySub.superclass = this; - jQuerySub.fn = jQuerySub.prototype = this(); - jQuerySub.fn.constructor = jQuerySub; - jQuerySub.sub = this.sub; - jQuerySub.fn.init = function init( selector, context ) { - if ( context && context instanceof jQuery && !(context instanceof jQuerySub) ) { - context = jQuerySub( context ); - } - - return jQuery.fn.init.call( this, selector, context, rootjQuerySub ); - }; - jQuerySub.fn.init.prototype = jQuerySub.fn; - var rootjQuerySub = jQuerySub(document); - return jQuerySub; -}; - -})(); -var curCSS, iframe, iframeDoc, - ralpha = /alpha\([^)]*\)/i, - ropacity = /opacity=([^)]*)/, - rposition = /^(top|right|bottom|left)$/, - // swappable if display is none or starts with table except "table", "table-cell", or "table-caption" - // see here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rmargin = /^margin/, - rnumsplit = new RegExp( "^(" + core_pnum + ")(.*)$", "i" ), - rnumnonpx = new RegExp( "^(" + core_pnum + ")(?!px)[a-z%]+$", "i" ), - rrelNum = new RegExp( "^([-+])=(" + core_pnum + ")", "i" ), - elemdisplay = { BODY: "block" }, - - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: 0, - fontWeight: 400 - }, - - cssExpand = [ "Top", "Right", "Bottom", "Left" ], - cssPrefixes = [ "Webkit", "O", "Moz", "ms" ], - - eventsToggle = jQuery.fn.toggle; - -// return a css property mapped to a potentially vendor prefixed property -function vendorPropName( style, name ) { - - // shortcut for names that are not vendor prefixed - if ( name in style ) { - return name; - } - - // check for vendor prefixed names - var capName = name.charAt(0).toUpperCase() + name.slice(1), - origName = name, - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in style ) { - return name; - } - } - - return origName; -} - -function isHidden( elem, el ) { - elem = el || elem; - return jQuery.css( elem, "display" ) === "none" || !jQuery.contains( elem.ownerDocument, elem ); -} - -function showHide( elements, show ) { - var elem, display, - values = [], - index = 0, - length = elements.length; - - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - values[ index ] = jQuery._data( elem, "olddisplay" ); - if ( show ) { - // Reset the inline display of this element to learn if it is - // being hidden by cascaded rules or not - if ( !values[ index ] && elem.style.display === "none" ) { - elem.style.display = ""; - } - - // Set elements which have been overridden with display: none - // in a stylesheet to whatever the default browser style is - // for such an element - if ( elem.style.display === "" && isHidden( elem ) ) { - values[ index ] = jQuery._data( elem, "olddisplay", css_defaultDisplay(elem.nodeName) ); - } - } else { - display = curCSS( elem, "display" ); - - if ( !values[ index ] && display !== "none" ) { - jQuery._data( elem, "olddisplay", display ); - } - } - } - - // Set the display of most of the elements in a second loop - // to avoid the constant reflow - for ( index = 0; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - if ( !show || elem.style.display === "none" || elem.style.display === "" ) { - elem.style.display = show ? values[ index ] || "" : "none"; - } - } - - return elements; -} - -jQuery.fn.extend({ - css: function( name, value ) { - return jQuery.access( this, function( elem, name, value ) { - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - }, - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state, fn2 ) { - var bool = typeof state === "boolean"; - - if ( jQuery.isFunction( state ) && jQuery.isFunction( fn2 ) ) { - return eventsToggle.apply( this, arguments ); - } - - return this.each(function() { - if ( bool ? state : isHidden( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - }); - } -}); - -jQuery.extend({ - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - - } - } - } - }, - - // Exclude the following css properties to add px - cssNumber: { - "fillOpacity": true, - "fontWeight": true, - "lineHeight": true, - "opacity": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: { - // normalize float css property - "float": jQuery.support.cssFloat ? "cssFloat" : "styleFloat" - }, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = jQuery.camelCase( name ), - style = elem.style; - - name = jQuery.cssProps[ origName ] || ( jQuery.cssProps[ origName ] = vendorPropName( style, origName ) ); - - // gets hook for the prefixed version - // followed by the unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // convert relative number strings (+= or -=) to relative numbers. #7345 - if ( type === "string" && (ret = rrelNum.exec( value )) ) { - value = ( ret[1] + 1 ) * ret[2] + parseFloat( jQuery.css( elem, name ) ); - // Fixes bug #9237 - type = "number"; - } - - // Make sure that NaN and null values aren't set. See: #7116 - if ( value == null || type === "number" && isNaN( value ) ) { - return; - } - - // If a number was passed in, add 'px' to the (except for certain CSS properties) - if ( type === "number" && !jQuery.cssNumber[ origName ] ) { - value += "px"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !("set" in hooks) || (value = hooks.set( elem, value, extra )) !== undefined ) { - // Wrapped to prevent IE from throwing errors when 'invalid' values are provided - // Fixes bug #5509 - try { - style[ name ] = value; - } catch(e) {} - } - - } else { - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && (ret = hooks.get( elem, false, extra )) !== undefined ) { - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, numeric, extra ) { - var val, num, hooks, - origName = jQuery.camelCase( name ); - - // Make sure that we're working with the right name - name = jQuery.cssProps[ origName ] || ( jQuery.cssProps[ origName ] = vendorPropName( elem.style, origName ) ); - - // gets hook for the prefixed version - // followed by the unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name ); - } - - //convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Return, converting to number if forced or a qualifier was provided and val looks numeric - if ( numeric || extra !== undefined ) { - num = parseFloat( val ); - return numeric || jQuery.isNumeric( num ) ? num || 0 : val; - } - return val; - }, - - // A method for quickly swapping in/out CSS properties to get correct calculations - swap: function( elem, options, callback ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.call( elem ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; - } -}); - -// NOTE: To any future maintainer, we've window.getComputedStyle -// because jsdom on node.js will break without it. -if ( window.getComputedStyle ) { - curCSS = function( elem, name ) { - var ret, width, minWidth, maxWidth, - computed = window.getComputedStyle( elem, null ), - style = elem.style; - - if ( computed ) { - - // getPropertyValue is only needed for .css('filter') in IE9, see #12537 - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Chrome < 17 and Safari 5.0 uses "computed value" instead of "used value" for margin-right - // Safari 5.1.7 (at least) returns percentage for a larger set of values, but width seems to be reliably pixels - // this is against the CSSOM draft spec: http://dev.w3.org/csswg/cssom/#resolved-values - if ( rnumnonpx.test( ret ) && rmargin.test( name ) ) { - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret; - }; -} else if ( document.documentElement.currentStyle ) { - curCSS = function( elem, name ) { - var left, rsLeft, - ret = elem.currentStyle && elem.currentStyle[ name ], - style = elem.style; - - // Avoid setting ret to empty string here - // so we don't default to auto - if ( ret == null && style && style[ name ] ) { - ret = style[ name ]; - } - - // From the awesome hack by Dean Edwards - // http://erik.eae.net/archives/2007/07/27/18.54.15/#comment-102291 - - // If we're not dealing with a regular pixel number - // but a number that has a weird ending, we need to convert it to pixels - // but not position css attributes, as those are proportional to the parent element instead - // and we can't measure the parent instead because it might trigger a "stacking dolls" problem - if ( rnumnonpx.test( ret ) && !rposition.test( name ) ) { - - // Remember the original values - left = style.left; - rsLeft = elem.runtimeStyle && elem.runtimeStyle.left; - - // Put in the new values to get a computed value out - if ( rsLeft ) { - elem.runtimeStyle.left = elem.currentStyle.left; - } - style.left = name === "fontSize" ? "1em" : ret; - ret = style.pixelLeft + "px"; - - // Revert the changed values - style.left = left; - if ( rsLeft ) { - elem.runtimeStyle.left = rsLeft; - } - } - - return ret === "" ? "auto" : ret; - }; -} - -function setPositiveNumber( elem, value, subtract ) { - var matches = rnumsplit.exec( value ); - return matches ? - Math.max( 0, matches[ 1 ] - ( subtract || 0 ) ) + ( matches[ 2 ] || "px" ) : - value; -} - -function augmentWidthOrHeight( elem, name, extra, isBorderBox ) { - var i = extra === ( isBorderBox ? "border" : "content" ) ? - // If we already have the right measurement, avoid augmentation - 4 : - // Otherwise initialize for horizontal or vertical properties - name === "width" ? 1 : 0, - - val = 0; - - for ( ; i < 4; i += 2 ) { - // both box models exclude margin, so add it if we want it - if ( extra === "margin" ) { - // we use jQuery.css instead of curCSS here - // because of the reliableMarginRight CSS hook! - val += jQuery.css( elem, extra + cssExpand[ i ], true ); - } - - // From this point on we use curCSS for maximum performance (relevant in animations) - if ( isBorderBox ) { - // border-box includes padding, so remove it if we want content - if ( extra === "content" ) { - val -= parseFloat( curCSS( elem, "padding" + cssExpand[ i ] ) ) || 0; - } - - // at this point, extra isn't border nor margin, so remove border - if ( extra !== "margin" ) { - val -= parseFloat( curCSS( elem, "border" + cssExpand[ i ] + "Width" ) ) || 0; - } - } else { - // at this point, extra isn't content, so add padding - val += parseFloat( curCSS( elem, "padding" + cssExpand[ i ] ) ) || 0; - - // at this point, extra isn't content nor padding, so add border - if ( extra !== "padding" ) { - val += parseFloat( curCSS( elem, "border" + cssExpand[ i ] + "Width" ) ) || 0; - } - } - } - - return val; -} - -function getWidthOrHeight( elem, name, extra ) { - - // Start with offset property, which is equivalent to the border-box value - var val = name === "width" ? elem.offsetWidth : elem.offsetHeight, - valueIsBorderBox = true, - isBorderBox = jQuery.support.boxSizing && jQuery.css( elem, "boxSizing" ) === "border-box"; - - // some non-html elements return undefined for offsetWidth, so check for null/undefined - // svg - https://bugzilla.mozilla.org/show_bug.cgi?id=649285 - // MathML - https://bugzilla.mozilla.org/show_bug.cgi?id=491668 - if ( val <= 0 || val == null ) { - // Fall back to computed then uncomputed css if necessary - val = curCSS( elem, name ); - if ( val < 0 || val == null ) { - val = elem.style[ name ]; - } - - // Computed unit is not pixels. Stop here and return. - if ( rnumnonpx.test(val) ) { - return val; - } - - // we need the check for style in case a browser which returns unreliable values - // for getComputedStyle silently falls back to the reliable elem.style - valueIsBorderBox = isBorderBox && ( jQuery.support.boxSizingReliable || val === elem.style[ name ] ); - - // Normalize "", auto, and prepare for extra - val = parseFloat( val ) || 0; - } - - // use the active box-sizing model to add/subtract irrelevant styles - return ( val + - augmentWidthOrHeight( - elem, - name, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox - ) - ) + "px"; -} - - -// Try to determine the default display value of an element -function css_defaultDisplay( nodeName ) { - if ( elemdisplay[ nodeName ] ) { - return elemdisplay[ nodeName ]; - } - - var elem = jQuery( "<" + nodeName + ">" ).appendTo( document.body ), - display = elem.css("display"); - elem.remove(); - - // If the simple way fails, - // get element's real default display by attaching it to a temp iframe - if ( display === "none" || display === "" ) { - // Use the already-created iframe if possible - iframe = document.body.appendChild( - iframe || jQuery.extend( document.createElement("iframe"), { - frameBorder: 0, - width: 0, - height: 0 - }) - ); - - // Create a cacheable copy of the iframe document on first call. - // IE and Opera will allow us to reuse the iframeDoc without re-writing the fake HTML - // document to it; WebKit & Firefox won't allow reusing the iframe document. - if ( !iframeDoc || !iframe.createElement ) { - iframeDoc = ( iframe.contentWindow || iframe.contentDocument ).document; - iframeDoc.write(""); - iframeDoc.close(); - } - - elem = iframeDoc.body.appendChild( iframeDoc.createElement(nodeName) ); - - display = curCSS( elem, "display" ); - document.body.removeChild( iframe ); - } - - // Store the correct default display - elemdisplay[ nodeName ] = display; - - return display; -} - -jQuery.each([ "height", "width" ], function( i, name ) { - jQuery.cssHooks[ name ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - // certain elements can have dimension info if we invisibly show them - // however, it must have a current display style that would benefit from this - if ( elem.offsetWidth === 0 && rdisplayswap.test( curCSS( elem, "display" ) ) ) { - return jQuery.swap( elem, cssShow, function() { - return getWidthOrHeight( elem, name, extra ); - }); - } else { - return getWidthOrHeight( elem, name, extra ); - } - } - }, - - set: function( elem, value, extra ) { - return setPositiveNumber( elem, value, extra ? - augmentWidthOrHeight( - elem, - name, - extra, - jQuery.support.boxSizing && jQuery.css( elem, "boxSizing" ) === "border-box" - ) : 0 - ); - } - }; -}); - -if ( !jQuery.support.opacity ) { - jQuery.cssHooks.opacity = { - get: function( elem, computed ) { - // IE uses filters for opacity - return ropacity.test( (computed && elem.currentStyle ? elem.currentStyle.filter : elem.style.filter) || "" ) ? - ( 0.01 * parseFloat( RegExp.$1 ) ) + "" : - computed ? "1" : ""; - }, - - set: function( elem, value ) { - var style = elem.style, - currentStyle = elem.currentStyle, - opacity = jQuery.isNumeric( value ) ? "alpha(opacity=" + value * 100 + ")" : "", - filter = currentStyle && currentStyle.filter || style.filter || ""; - - // IE has trouble with opacity if it does not have layout - // Force it by setting the zoom level - style.zoom = 1; - - // if setting opacity to 1, and no other filters exist - attempt to remove filter attribute #6652 - if ( value >= 1 && jQuery.trim( filter.replace( ralpha, "" ) ) === "" && - style.removeAttribute ) { - - // Setting style.filter to null, "" & " " still leave "filter:" in the cssText - // if "filter:" is present at all, clearType is disabled, we want to avoid this - // style.removeAttribute is IE Only, but so apparently is this code path... - style.removeAttribute( "filter" ); - - // if there there is no filter style applied in a css rule, we are done - if ( currentStyle && !currentStyle.filter ) { - return; - } - } - - // otherwise, set new filter values - style.filter = ralpha.test( filter ) ? - filter.replace( ralpha, opacity ) : - filter + " " + opacity; - } - }; -} - -// These hooks cannot be added until DOM ready because the support test -// for it is not run until after DOM ready -jQuery(function() { - if ( !jQuery.support.reliableMarginRight ) { - jQuery.cssHooks.marginRight = { - get: function( elem, computed ) { - // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right - // Work around by temporarily setting element display to inline-block - return jQuery.swap( elem, { "display": "inline-block" }, function() { - if ( computed ) { - return curCSS( elem, "marginRight" ); - } - }); - } - }; - } - - // Webkit bug: https://bugs.webkit.org/show_bug.cgi?id=29084 - // getComputedStyle returns percent when specified for top/left/bottom/right - // rather than make the css module depend on the offset module, we just check for it here - if ( !jQuery.support.pixelPosition && jQuery.fn.position ) { - jQuery.each( [ "top", "left" ], function( i, prop ) { - jQuery.cssHooks[ prop ] = { - get: function( elem, computed ) { - if ( computed ) { - var ret = curCSS( elem, prop ); - // if curCSS returns percentage, fallback to offset - return rnumnonpx.test( ret ) ? jQuery( elem ).position()[ prop ] + "px" : ret; - } - } - }; - }); - } - -}); - -if ( jQuery.expr && jQuery.expr.filters ) { - jQuery.expr.filters.hidden = function( elem ) { - return ( elem.offsetWidth === 0 && elem.offsetHeight === 0 ) || (!jQuery.support.reliableHiddenOffsets && ((elem.style && elem.style.display) || curCSS( elem, "display" )) === "none"); - }; - - jQuery.expr.filters.visible = function( elem ) { - return !jQuery.expr.filters.hidden( elem ); - }; -} - -// These hooks are used by animate to expand properties -jQuery.each({ - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i, - - // assumes a single number if not a string - parts = typeof value === "string" ? value.split(" ") : [ value ], - expanded = {}; - - for ( i = 0; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( !rmargin.test( prefix ) ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -}); -var r20 = /%20/g, - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rinput = /^(?:color|date|datetime|datetime-local|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i, - rselectTextarea = /^(?:select|textarea)/i; - -jQuery.fn.extend({ - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map(function(){ - return this.elements ? jQuery.makeArray( this.elements ) : this; - }) - .filter(function(){ - return this.name && !this.disabled && - ( this.checked || rselectTextarea.test( this.nodeName ) || - rinput.test( this.type ) ); - }) - .map(function( i, elem ){ - var val = jQuery( this ).val(); - - return val == null ? - null : - jQuery.isArray( val ) ? - jQuery.map( val, function( val, i ){ - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - }) : - { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - }).get(); - } -}); - -//Serialize an array of form elements or a set of -//key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, value ) { - // If value is a function, invoke it and return its value - value = jQuery.isFunction( value ) ? value() : ( value == null ? "" : value ); - s[ s.length ] = encodeURIComponent( key ) + "=" + encodeURIComponent( value ); - }; - - // Set traditional to true for jQuery <= 1.3.2 behavior. - if ( traditional === undefined ) { - traditional = jQuery.ajaxSettings && jQuery.ajaxSettings.traditional; - } - - // If an array was passed in, assume that it is an array of form elements. - if ( jQuery.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - }); - - } else { - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ).replace( r20, "+" ); -}; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( jQuery.isArray( obj ) ) { - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - // If array item is non-scalar (array or object), encode its - // numeric index to resolve deserialization ambiguity issues. - // Note that rack (as of 1.0.0) can't currently deserialize - // nested arrays properly, and attempting to do so may cause - // a server error. Possible fixes are to modify rack's - // deserialization algorithm or to provide an option or flag - // to force array serialization to be shallow. - buildParams( prefix + "[" + ( typeof v === "object" ? i : "" ) + "]", v, traditional, add ); - } - }); - - } else if ( !traditional && jQuery.type( obj ) === "object" ) { - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - // Serialize scalar item. - add( prefix, obj ); - } -} -var - // Document location - ajaxLocParts, - ajaxLocation, - - rhash = /#.*$/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)\r?$/mg, // IE leaves an \r character at EOL - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app\-storage|.+\-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - rquery = /\?/, - rscript = /)<[^<]*)*<\/script>/gi, - rts = /([?&])_=[^&]*/, - rurl = /^([\w\+\.\-]+:)(?:\/\/([^\/?#:]*)(?::(\d+)|)|)/, - - // Keep a copy of the old load method - _load = jQuery.fn.load, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = ["*/"] + ["*"]; - -// #8138, IE may throw an exception when accessing -// a field from window.location if document.domain has been set -try { - ajaxLocation = location.href; -} catch( e ) { - // Use the href attribute of an A element - // since IE will modify it given document.location - ajaxLocation = document.createElement( "a" ); - ajaxLocation.href = ""; - ajaxLocation = ajaxLocation.href; -} - -// Segment location into parts -ajaxLocParts = rurl.exec( ajaxLocation.toLowerCase() ) || []; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, list, placeBefore, - dataTypes = dataTypeExpression.toLowerCase().split( core_rspace ), - i = 0, - length = dataTypes.length; - - if ( jQuery.isFunction( func ) ) { - // For each dataType in the dataTypeExpression - for ( ; i < length; i++ ) { - dataType = dataTypes[ i ]; - // We control if we're asked to add before - // any existing element - placeBefore = /^\+/.test( dataType ); - if ( placeBefore ) { - dataType = dataType.substr( 1 ) || "*"; - } - list = structure[ dataType ] = structure[ dataType ] || []; - // then we add to the structure accordingly - list[ placeBefore ? "unshift" : "push" ]( func ); - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR, - dataType /* internal */, inspected /* internal */ ) { - - dataType = dataType || options.dataTypes[ 0 ]; - inspected = inspected || {}; - - inspected[ dataType ] = true; - - var selection, - list = structure[ dataType ], - i = 0, - length = list ? list.length : 0, - executeOnly = ( structure === prefilters ); - - for ( ; i < length && ( executeOnly || !selection ); i++ ) { - selection = list[ i ]( options, originalOptions, jqXHR ); - // If we got redirected to another dataType - // we try there if executing only and not done already - if ( typeof selection === "string" ) { - if ( !executeOnly || inspected[ selection ] ) { - selection = undefined; - } else { - options.dataTypes.unshift( selection ); - selection = inspectPrefiltersOrTransports( - structure, options, originalOptions, jqXHR, selection, inspected ); - } - } - } - // If we're only executing or nothing was selected - // we try the catchall dataType if not done already - if ( ( executeOnly || !selection ) && !inspected[ "*" ] ) { - selection = inspectPrefiltersOrTransports( - structure, options, originalOptions, jqXHR, "*", inspected ); - } - // unnecessary when only executing (prefilters) - // but it'll be ignored by the caller in that case - return selection; -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } -} - -jQuery.fn.load = function( url, params, callback ) { - if ( typeof url !== "string" && _load ) { - return _load.apply( this, arguments ); - } - - // Don't do a request if no elements are being requested - if ( !this.length ) { - return this; - } - - var selector, type, response, - self = this, - off = url.indexOf(" "); - - if ( off >= 0 ) { - selector = url.slice( off, url.length ); - url = url.slice( 0, off ); - } - - // If it's a function - if ( jQuery.isFunction( params ) ) { - - // We assume that it's the callback - callback = params; - params = undefined; - - // Otherwise, build a param string - } else if ( params && typeof params === "object" ) { - type = "POST"; - } - - // Request the remote document - jQuery.ajax({ - url: url, - - // if "type" variable is undefined, then "GET" method will be used - type: type, - dataType: "html", - data: params, - complete: function( jqXHR, status ) { - if ( callback ) { - self.each( callback, response || [ jqXHR.responseText, status, jqXHR ] ); - } - } - }).done(function( responseText ) { - - // Save response for use in complete callback - response = arguments; - - // See if a selector was specified - self.html( selector ? - - // Create a dummy div to hold the results - jQuery("
") - - // inject the contents of the document in, removing the scripts - // to avoid any 'Permission Denied' errors in IE - .append( responseText.replace( rscript, "" ) ) - - // Locate the specified elements - .find( selector ) : - - // If not, just inject the full result - responseText ); - - }); - - return this; -}; - -// Attach a bunch of functions for handling common AJAX events -jQuery.each( "ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split( " " ), function( i, o ){ - jQuery.fn[ o ] = function( f ){ - return this.on( o, f ); - }; -}); - -jQuery.each( [ "get", "post" ], function( i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - // shift arguments if data argument was omitted - if ( jQuery.isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - return jQuery.ajax({ - type: method, - url: url, - data: data, - success: callback, - dataType: type - }); - }; -}); - -jQuery.extend({ - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - if ( settings ) { - // Building a settings object - ajaxExtend( target, jQuery.ajaxSettings ); - } else { - // Extending ajaxSettings - settings = target; - target = jQuery.ajaxSettings; - } - ajaxExtend( target, settings ); - return target; - }, - - ajaxSettings: { - url: ajaxLocation, - isLocal: rlocalProtocol.test( ajaxLocParts[ 1 ] ), - global: true, - type: "GET", - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - processData: true, - async: true, - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - xml: "application/xml, text/xml", - html: "text/html", - text: "text/plain", - json: "application/json, text/javascript", - "*": allTypes - }, - - contents: { - xml: /xml/, - html: /html/, - json: /json/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText" - }, - - // List of data converters - // 1) key format is "source_type destination_type" (a single space in-between) - // 2) the catchall symbol "*" can be used for source_type - converters: { - - // Convert anything to text - "* text": window.String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": jQuery.parseJSON, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - context: true, - url: true - } - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var // ifModified key - ifModifiedKey, - // Response headers - responseHeadersString, - responseHeaders, - // transport - transport, - // timeout handle - timeoutTimer, - // Cross-domain detection vars - parts, - // To know if global events are to be dispatched - fireGlobals, - // Loop variable - i, - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - // Callbacks context - callbackContext = s.context || s, - // Context for global events - // It's the callbackContext if one was provided in the options - // and if it's a DOM node or a jQuery collection - globalEventContext = callbackContext !== s && - ( callbackContext.nodeType || callbackContext instanceof jQuery ) ? - jQuery( callbackContext ) : jQuery.event, - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - // Status-dependent callbacks - statusCode = s.statusCode || {}, - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - // The jqXHR state - state = 0, - // Default abort message - strAbort = "canceled", - // Fake xhr - jqXHR = { - - readyState: 0, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( !state ) { - var lname = name.toLowerCase(); - name = requestHeadersNames[ lname ] = requestHeadersNames[ lname ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Raw string - getAllResponseHeaders: function() { - return state === 2 ? responseHeadersString : null; - }, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( state === 2 ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[1].toLowerCase() ] = match[ 2 ]; - } - } - match = responseHeaders[ key.toLowerCase() ]; - } - return match === undefined ? null : match; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( !state ) { - s.mimeType = type; - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - statusText = statusText || strAbort; - if ( transport ) { - transport.abort( statusText ); - } - done( 0, statusText ); - return this; - } - }; - - // Callback for when everything is done - // It is defined here because jslint complains if it is declared - // at the end of the function (which would be more logical and readable) - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Called once - if ( state === 2 ) { - return; - } - - // State is "done" now - state = 2; - - // Clear timeout if it exists - if ( timeoutTimer ) { - clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // If successful, handle type chaining - if ( status >= 200 && status < 300 || status === 304 ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - - modified = jqXHR.getResponseHeader("Last-Modified"); - if ( modified ) { - jQuery.lastModified[ ifModifiedKey ] = modified; - } - modified = jqXHR.getResponseHeader("Etag"); - if ( modified ) { - jQuery.etag[ ifModifiedKey ] = modified; - } - } - - // If not modified - if ( status === 304 ) { - - statusText = "notmodified"; - isSuccess = true; - - // If we have data - } else { - - isSuccess = ajaxConvert( s, response ); - statusText = isSuccess.state; - success = isSuccess.data; - error = isSuccess.error; - isSuccess = !error; - } - } else { - // We extract error from statusText - // then normalize statusText and status for non-aborts - error = statusText; - if ( !statusText || status ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( "ajax" + ( isSuccess ? "Success" : "Error" ), - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - // Attach deferreds - deferred.promise( jqXHR ); - jqXHR.success = jqXHR.done; - jqXHR.error = jqXHR.fail; - jqXHR.complete = completeDeferred.add; - - // Status-dependent callbacks - jqXHR.statusCode = function( map ) { - if ( map ) { - var tmp; - if ( state < 2 ) { - for ( tmp in map ) { - statusCode[ tmp ] = [ statusCode[tmp], map[tmp] ]; - } - } else { - tmp = map[ jqXHR.status ]; - jqXHR.always( tmp ); - } - } - return this; - }; - - // Remove hash character (#7531: and string promotion) - // Add protocol if not provided (#5866: IE7 issue with protocol-less urls) - // We also use the url parameter if available - s.url = ( ( url || s.url ) + "" ).replace( rhash, "" ).replace( rprotocol, ajaxLocParts[ 1 ] + "//" ); - - // Extract dataTypes list - s.dataTypes = jQuery.trim( s.dataType || "*" ).toLowerCase().split( core_rspace ); - - // A cross-domain request is in order when we have a protocol:host:port mismatch - if ( s.crossDomain == null ) { - parts = rurl.exec( s.url.toLowerCase() ); - s.crossDomain = !!( parts && - ( parts[ 1 ] !== ajaxLocParts[ 1 ] || parts[ 2 ] !== ajaxLocParts[ 2 ] || - ( parts[ 3 ] || ( parts[ 1 ] === "http:" ? 80 : 443 ) ) != - ( ajaxLocParts[ 3 ] || ( ajaxLocParts[ 1 ] === "http:" ? 80 : 443 ) ) ) - ); - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( state === 2 ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - fireGlobals = s.global; - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // If data is available, append data to url - if ( s.data ) { - s.url += ( rquery.test( s.url ) ? "&" : "?" ) + s.data; - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Get ifModifiedKey before adding the anti-cache parameter - ifModifiedKey = s.url; - - // Add anti-cache in url if needed - if ( s.cache === false ) { - - var ts = jQuery.now(), - // try replacing _= if it is there - ret = s.url.replace( rts, "$1_=" + ts ); - - // if nothing was replaced, add timestamp to the end - s.url = ret + ( ( ret === s.url ) ? ( rquery.test( s.url ) ? "&" : "?" ) + "_=" + ts : "" ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - ifModifiedKey = ifModifiedKey || s.url; - if ( jQuery.lastModified[ ifModifiedKey ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ ifModifiedKey ] ); - } - if ( jQuery.etag[ ifModifiedKey ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ ifModifiedKey ] ); - } - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[0] ] ? - s.accepts[ s.dataTypes[0] ] + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || state === 2 ) ) { - // Abort if not done already and return - return jqXHR.abort(); - - } - - // aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - for ( i in { success: 1, error: 1, complete: 1 } ) { - jqXHR[ i ]( s[ i ] ); - } - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = setTimeout( function(){ - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - state = 1; - transport.send( requestHeaders, done ); - } catch (e) { - // Propagate exception as error if not done - if ( state < 2 ) { - done( -1, e ); - // Simply rethrow otherwise - } else { - throw e; - } - } - } - - return jqXHR; - }, - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {} - -}); - -/* Handles responses to an ajax request: - * - sets all responseXXX fields accordingly - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes, - responseFields = s.responseFields; - - // Fill responseXXX fields - for ( type in responseFields ) { - if ( type in responses ) { - jqXHR[ responseFields[type] ] = responses[ type ]; - } - } - - // Remove auto dataType and get content-type in the process - while( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "content-type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[0] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -// Chain conversions given the request and the original response -function ajaxConvert( s, response ) { - - var conv, conv2, current, tmp, - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(), - prev = dataTypes[ 0 ], - converters = {}, - i = 0; - - // Apply the dataFilter if provided - if ( s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - // Convert to each sequential dataType, tolerating list modification - for ( ; (current = dataTypes[++i]); ) { - - // There's only work to do if current dataType is non-auto - if ( current !== "*" ) { - - // Convert response if prev dataType is non-auto and differs from current - if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split(" "); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.splice( i--, 0, current ); - } - - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s["throws"] ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { state: "parsererror", error: conv ? e : "No conversion from " + prev + " to " + current }; - } - } - } - } - - // Update prev for next iteration - prev = current; - } - } - - return { state: "success", data: response }; -} -var oldCallbacks = [], - rquestion = /\?/, - rjsonp = /(=)\?(?=&|$)|\?\?/, - nonce = jQuery.now(); - -// Default jsonp settings -jQuery.ajaxSetup({ - jsonp: "callback", - jsonpCallback: function() { - var callback = oldCallbacks.pop() || ( jQuery.expando + "_" + ( nonce++ ) ); - this[ callback ] = true; - return callback; - } -}); - -// Detect, normalize options and install callbacks for jsonp requests -jQuery.ajaxPrefilter( "json jsonp", function( s, originalSettings, jqXHR ) { - - var callbackName, overwritten, responseContainer, - data = s.data, - url = s.url, - hasCallback = s.jsonp !== false, - replaceInUrl = hasCallback && rjsonp.test( url ), - replaceInData = hasCallback && !replaceInUrl && typeof data === "string" && - !( s.contentType || "" ).indexOf("application/x-www-form-urlencoded") && - rjsonp.test( data ); - - // Handle iff the expected data type is "jsonp" or we have a parameter to set - if ( s.dataTypes[ 0 ] === "jsonp" || replaceInUrl || replaceInData ) { - - // Get callback name, remembering preexisting value associated with it - callbackName = s.jsonpCallback = jQuery.isFunction( s.jsonpCallback ) ? - s.jsonpCallback() : - s.jsonpCallback; - overwritten = window[ callbackName ]; - - // Insert callback into url or form data - if ( replaceInUrl ) { - s.url = url.replace( rjsonp, "$1" + callbackName ); - } else if ( replaceInData ) { - s.data = data.replace( rjsonp, "$1" + callbackName ); - } else if ( hasCallback ) { - s.url += ( rquestion.test( url ) ? "&" : "?" ) + s.jsonp + "=" + callbackName; - } - - // Use data converter to retrieve json after script execution - s.converters["script json"] = function() { - if ( !responseContainer ) { - jQuery.error( callbackName + " was not called" ); - } - return responseContainer[ 0 ]; - }; - - // force json dataType - s.dataTypes[ 0 ] = "json"; - - // Install callback - window[ callbackName ] = function() { - responseContainer = arguments; - }; - - // Clean-up function (fires after converters) - jqXHR.always(function() { - // Restore preexisting value - window[ callbackName ] = overwritten; - - // Save back as free - if ( s[ callbackName ] ) { - // make sure that re-using the options doesn't screw things around - s.jsonpCallback = originalSettings.jsonpCallback; - - // save the callback name for future use - oldCallbacks.push( callbackName ); - } - - // Call if it was a function and we have a response - if ( responseContainer && jQuery.isFunction( overwritten ) ) { - overwritten( responseContainer[ 0 ] ); - } - - responseContainer = overwritten = undefined; - }); - - // Delegate to script - return "script"; - } -}); -// Install script dataType -jQuery.ajaxSetup({ - accepts: { - script: "text/javascript, application/javascript, application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /javascript|ecmascript/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -}); - -// Handle cache's special case and global -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - s.global = false; - } -}); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function(s) { - - // This transport only deals with cross domain requests - if ( s.crossDomain ) { - - var script, - head = document.head || document.getElementsByTagName( "head" )[0] || document.documentElement; - - return { - - send: function( _, callback ) { - - script = document.createElement( "script" ); - - script.async = "async"; - - if ( s.scriptCharset ) { - script.charset = s.scriptCharset; - } - - script.src = s.url; - - // Attach handlers for all browsers - script.onload = script.onreadystatechange = function( _, isAbort ) { - - if ( isAbort || !script.readyState || /loaded|complete/.test( script.readyState ) ) { - - // Handle memory leak in IE - script.onload = script.onreadystatechange = null; - - // Remove the script - if ( head && script.parentNode ) { - head.removeChild( script ); - } - - // Dereference the script - script = undefined; - - // Callback if not abort - if ( !isAbort ) { - callback( 200, "success" ); - } - } - }; - // Use insertBefore instead of appendChild to circumvent an IE6 bug. - // This arises when a base node is used (#2709 and #4378). - head.insertBefore( script, head.firstChild ); - }, - - abort: function() { - if ( script ) { - script.onload( 0, 1 ); - } - } - }; - } -}); -var xhrCallbacks, - // #5280: Internet Explorer will keep connections alive if we don't abort on unload - xhrOnUnloadAbort = window.ActiveXObject ? function() { - // Abort all pending requests - for ( var key in xhrCallbacks ) { - xhrCallbacks[ key ]( 0, 1 ); - } - } : false, - xhrId = 0; - -// Functions to create xhrs -function createStandardXHR() { - try { - return new window.XMLHttpRequest(); - } catch( e ) {} -} - -function createActiveXHR() { - try { - return new window.ActiveXObject( "Microsoft.XMLHTTP" ); - } catch( e ) {} -} - -// Create the request object -// (This is still attached to ajaxSettings for backward compatibility) -jQuery.ajaxSettings.xhr = window.ActiveXObject ? - /* Microsoft failed to properly - * implement the XMLHttpRequest in IE7 (can't request local files), - * so we use the ActiveXObject when it is available - * Additionally XMLHttpRequest can be disabled in IE7/IE8 so - * we need a fallback. - */ - function() { - return !this.isLocal && createStandardXHR() || createActiveXHR(); - } : - // For all other browsers, use the standard XMLHttpRequest object - createStandardXHR; - -// Determine support properties -(function( xhr ) { - jQuery.extend( jQuery.support, { - ajax: !!xhr, - cors: !!xhr && ( "withCredentials" in xhr ) - }); -})( jQuery.ajaxSettings.xhr() ); - -// Create transport if the browser can provide an xhr -if ( jQuery.support.ajax ) { - - jQuery.ajaxTransport(function( s ) { - // Cross domain only allowed if supported through XMLHttpRequest - if ( !s.crossDomain || jQuery.support.cors ) { - - var callback; - - return { - send: function( headers, complete ) { - - // Get a new xhr - var handle, i, - xhr = s.xhr(); - - // Open the socket - // Passing null username, generates a login popup on Opera (#2865) - if ( s.username ) { - xhr.open( s.type, s.url, s.async, s.username, s.password ); - } else { - xhr.open( s.type, s.url, s.async ); - } - - // Apply custom fields if provided - if ( s.xhrFields ) { - for ( i in s.xhrFields ) { - xhr[ i ] = s.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( s.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( s.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !s.crossDomain && !headers["X-Requested-With"] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Need an extra try/catch for cross domain requests in Firefox 3 - try { - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - } catch( _ ) {} - - // Do send the request - // This may raise an exception which is actually - // handled in jQuery.ajax (so no try/catch here) - xhr.send( ( s.hasContent && s.data ) || null ); - - // Listener - callback = function( _, isAbort ) { - - var status, - statusText, - responseHeaders, - responses, - xml; - - // Firefox throws exceptions when accessing properties - // of an xhr when a network error occurred - // http://helpful.knobs-dials.com/index.php/Component_returned_failure_code:_0x80040111_(NS_ERROR_NOT_AVAILABLE) - try { - - // Was never called and is aborted or complete - if ( callback && ( isAbort || xhr.readyState === 4 ) ) { - - // Only called once - callback = undefined; - - // Do not keep as active anymore - if ( handle ) { - xhr.onreadystatechange = jQuery.noop; - if ( xhrOnUnloadAbort ) { - delete xhrCallbacks[ handle ]; - } - } - - // If it's an abort - if ( isAbort ) { - // Abort it manually if needed - if ( xhr.readyState !== 4 ) { - xhr.abort(); - } - } else { - status = xhr.status; - responseHeaders = xhr.getAllResponseHeaders(); - responses = {}; - xml = xhr.responseXML; - - // Construct response list - if ( xml && xml.documentElement /* #4958 */ ) { - responses.xml = xml; - } - - // When requesting binary data, IE6-9 will throw an exception - // on any attempt to access responseText (#11426) - try { - responses.text = xhr.responseText; - } catch( e ) { - } - - // Firefox throws an exception when accessing - // statusText for faulty cross-domain requests - try { - statusText = xhr.statusText; - } catch( e ) { - // We normalize with Webkit giving an empty statusText - statusText = ""; - } - - // Filter status for non standard behaviors - - // If the request is local and we have data: assume a success - // (success with no data won't get notified, that's the best we - // can do given current implementations) - if ( !status && s.isLocal && !s.crossDomain ) { - status = responses.text ? 200 : 404; - // IE - #1450: sometimes returns 1223 when it should be 204 - } else if ( status === 1223 ) { - status = 204; - } - } - } - } catch( firefoxAccessException ) { - if ( !isAbort ) { - complete( -1, firefoxAccessException ); - } - } - - // Call complete if needed - if ( responses ) { - complete( status, statusText, responses, responseHeaders ); - } - }; - - if ( !s.async ) { - // if we're in sync mode we fire the callback - callback(); - } else if ( xhr.readyState === 4 ) { - // (IE6 & IE7) if it's in cache and has been - // retrieved directly we need to fire the callback - setTimeout( callback, 0 ); - } else { - handle = ++xhrId; - if ( xhrOnUnloadAbort ) { - // Create the active xhrs callbacks list if needed - // and attach the unload handler - if ( !xhrCallbacks ) { - xhrCallbacks = {}; - jQuery( window ).unload( xhrOnUnloadAbort ); - } - // Add to list of active xhrs callbacks - xhrCallbacks[ handle ] = callback; - } - xhr.onreadystatechange = callback; - } - }, - - abort: function() { - if ( callback ) { - callback(0,1); - } - } - }; - } - }); -} -var fxNow, timerId, - rfxtypes = /^(?:toggle|show|hide)$/, - rfxnum = new RegExp( "^(?:([-+])=|)(" + core_pnum + ")([a-z%]*)$", "i" ), - rrun = /queueHooks$/, - animationPrefilters = [ defaultPrefilter ], - tweeners = { - "*": [function( prop, value ) { - var end, unit, - tween = this.createTween( prop, value ), - parts = rfxnum.exec( value ), - target = tween.cur(), - start = +target || 0, - scale = 1, - maxIterations = 20; - - if ( parts ) { - end = +parts[2]; - unit = parts[3] || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - - // We need to compute starting value - if ( unit !== "px" && start ) { - // Iteratively approximate from a nonzero starting point - // Prefer the current property, because this process will be trivial if it uses the same units - // Fallback to end or a simple constant - start = jQuery.css( tween.elem, prop, true ) || end || 1; - - do { - // If previous iteration zeroed out, double until we get *something* - // Use a string for doubling factor so we don't accidentally see scale as unchanged below - scale = scale || ".5"; - - // Adjust and apply - start = start / scale; - jQuery.style( tween.elem, prop, start + unit ); - - // Update scale, tolerating zero or NaN from tween.cur() - // And breaking the loop if scale is unchanged or perfect, or if we've just had enough - } while ( scale !== (scale = tween.cur() / target) && scale !== 1 && --maxIterations ); - } - - tween.unit = unit; - tween.start = start; - // If a +=/-= token was provided, we're doing a relative animation - tween.end = parts[1] ? start + ( parts[1] + 1 ) * end : end; - } - return tween; - }] - }; - -// Animations created synchronously will run synchronously -function createFxNow() { - setTimeout(function() { - fxNow = undefined; - }, 0 ); - return ( fxNow = jQuery.now() ); -} - -function createTweens( animation, props ) { - jQuery.each( props, function( prop, value ) { - var collection = ( tweeners[ prop ] || [] ).concat( tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( collection[ index ].call( animation, prop, value ) ) { - - // we're done with this property - return; - } - } - }); -} - -function Animation( elem, properties, options ) { - var result, - index = 0, - tweenerIndex = 0, - length = animationPrefilters.length, - deferred = jQuery.Deferred().always( function() { - // don't match elem in the :animated selector - delete tick.elem; - }), - tick = function() { - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - // archaic crash bug won't allow us to use 1 - ( 0.5 || 0 ) (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length ; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ]); - - if ( percent < 1 && length ) { - return remaining; - } else { - deferred.resolveWith( elem, [ animation ] ); - return false; - } - }, - animation = deferred.promise({ - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { specialEasing: {} }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end, easing ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - // if we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - - for ( ; index < length ; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // resolve when we played the last frame - // otherwise, reject - if ( gotoEnd ) { - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - }), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length ; index++ ) { - result = animationPrefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - return result; - } - } - - createTweens( animation, props ); - - if ( jQuery.isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - jQuery.fx.timer( - jQuery.extend( tick, { - anim: animation, - queue: animation.opts.queue, - elem: elem - }) - ); - - // attach callbacks from options - return animation.progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = jQuery.camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( jQuery.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // not quite $.extend, this wont overwrite keys already present. - // also - reusing 'index' from above because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweener: function( props, callback ) { - if ( jQuery.isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.split(" "); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length ; index++ ) { - prop = props[ index ]; - tweeners[ prop ] = tweeners[ prop ] || []; - tweeners[ prop ].unshift( callback ); - } - }, - - prefilter: function( callback, prepend ) { - if ( prepend ) { - animationPrefilters.unshift( callback ); - } else { - animationPrefilters.push( callback ); - } - } -}); - -function defaultPrefilter( elem, props, opts ) { - var index, prop, value, length, dataShow, toggle, tween, hooks, oldfire, - anim = this, - style = elem.style, - orig = {}, - handled = [], - hidden = elem.nodeType && isHidden( elem ); - - // handle queue: false promises - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always(function() { - // doing this makes sure that the complete handler will be called - // before this completes - anim.always(function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - }); - }); - } - - // height/width overflow pass - if ( elem.nodeType === 1 && ( "height" in props || "width" in props ) ) { - // Make sure that nothing sneaks out - // Record all 3 overflow attributes because IE does not - // change the overflow attribute when overflowX and - // overflowY are set to the same value - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Set display property to inline-block for height/width - // animations on inline elements that are having width/height animated - if ( jQuery.css( elem, "display" ) === "inline" && - jQuery.css( elem, "float" ) === "none" ) { - - // inline-level elements accept inline-block; - // block-level elements need to be inline with layout - if ( !jQuery.support.inlineBlockNeedsLayout || css_defaultDisplay( elem.nodeName ) === "inline" ) { - style.display = "inline-block"; - - } else { - style.zoom = 1; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - if ( !jQuery.support.shrinkWrapBlocks ) { - anim.done(function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - }); - } - } - - - // show/hide pass - for ( index in props ) { - value = props[ index ]; - if ( rfxtypes.exec( value ) ) { - delete props[ index ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - continue; - } - handled.push( index ); - } - } - - length = handled.length; - if ( length ) { - dataShow = jQuery._data( elem, "fxshow" ) || jQuery._data( elem, "fxshow", {} ); - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - - // store state if its toggle - enables .stop().toggle() to "reverse" - if ( toggle ) { - dataShow.hidden = !hidden; - } - if ( hidden ) { - jQuery( elem ).show(); - } else { - anim.done(function() { - jQuery( elem ).hide(); - }); - } - anim.done(function() { - var prop; - jQuery.removeData( elem, "fxshow", true ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - }); - for ( index = 0 ; index < length ; index++ ) { - prop = handled[ index ]; - tween = anim.createTween( prop, hidden ? dataShow[ prop ] : 0 ); - orig[ prop ] = dataShow[ prop ] || jQuery.style( elem, prop ); - - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = tween.start; - if ( hidden ) { - tween.end = tween.start; - tween.start = prop === "width" || prop === "height" ? 1 : 0; - } - } - } - } -} - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || "swing"; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - if ( tween.elem[ tween.prop ] != null && - (!tween.elem.style || tween.elem.style[ tween.prop ] == null) ) { - return tween.elem[ tween.prop ]; - } - - // passing any value as a 4th parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails - // so, simple values such as "10px" are parsed to Float. - // complex values such as "rotate(1rad)" are returned as is. - result = jQuery.css( tween.elem, tween.prop, false, "" ); - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - // use step hook for back compat - use cssHook if its there - use .style if its - // available and use plain properties where available - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.style && ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || jQuery.cssHooks[ tween.prop ] ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Remove in 2.0 - this supports IE8's panic based approach -// to setting things on disconnected nodes - -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.each([ "toggle", "show", "hide" ], function( i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" || - // special check for .toggle( handler, handler, ... ) - ( !i && jQuery.isFunction( speed ) && jQuery.isFunction( easing ) ) ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -}); - -jQuery.fn.extend({ - fadeTo: function( speed, to, easing, callback ) { - - // show any hidden elements after setting opacity to 0 - return this.filter( isHidden ).css( "opacity", 0 ).show() - - // animate to the value specified - .end().animate({ opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations resolve immediately - if ( empty ) { - anim.stop( true ); - } - }; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue && type !== false ) { - this.queue( type || "fx", [] ); - } - - return this.each(function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = jQuery._data( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && (type == null || timers[ index ].queue === type) ) { - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // start the next in the queue if the last step wasn't forced - // timers currently will call their complete callbacks, which will dequeue - // but only if they were gotoEnd - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - }); - } -}); - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - attrs = { height: type }, - i = 0; - - // if we include width, step value is 1 to do all cssExpand values, - // if we don't include width, step value is 2 to skip over Left and Right - includeWidth = includeWidth? 1 : 0; - for( ; i < 4 ; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -// Generate shortcuts for custom animations -jQuery.each({ - slideDown: genFx("show"), - slideUp: genFx("hide"), - slideToggle: genFx("toggle"), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -}); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - jQuery.isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing - }; - - opt.duration = jQuery.fx.off ? 0 : typeof opt.duration === "number" ? opt.duration : - opt.duration in jQuery.fx.speeds ? jQuery.fx.speeds[ opt.duration ] : jQuery.fx.speeds._default; - - // normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( jQuery.isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p*Math.PI ) / 2; - } -}; - -jQuery.timers = []; -jQuery.fx = Tween.prototype.init; -jQuery.fx.tick = function() { - var timer, - timers = jQuery.timers, - i = 0; - - fxNow = jQuery.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - // Checks the timer has not already been removed - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - if ( timer() && jQuery.timers.push( timer ) && !timerId ) { - timerId = setInterval( jQuery.fx.tick, jQuery.fx.interval ); - } -}; - -jQuery.fx.interval = 13; - -jQuery.fx.stop = function() { - clearInterval( timerId ); - timerId = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - // Default speed - _default: 400 -}; - -// Back Compat <1.8 extension point -jQuery.fx.step = {}; - -if ( jQuery.expr && jQuery.expr.filters ) { - jQuery.expr.filters.animated = function( elem ) { - return jQuery.grep(jQuery.timers, function( fn ) { - return elem === fn.elem; - }).length; - }; -} -var rroot = /^(?:body|html)$/i; - -jQuery.fn.offset = function( options ) { - if ( arguments.length ) { - return options === undefined ? - this : - this.each(function( i ) { - jQuery.offset.setOffset( this, options, i ); - }); - } - - var docElem, body, win, clientTop, clientLeft, scrollTop, scrollLeft, - box = { top: 0, left: 0 }, - elem = this[ 0 ], - doc = elem && elem.ownerDocument; - - if ( !doc ) { - return; - } - - if ( (body = doc.body) === elem ) { - return jQuery.offset.bodyOffset( elem ); - } - - docElem = doc.documentElement; - - // Make sure it's not a disconnected DOM node - if ( !jQuery.contains( docElem, elem ) ) { - return box; - } - - // If we don't have gBCR, just use 0,0 rather than error - // BlackBerry 5, iOS 3 (original iPhone) - if ( typeof elem.getBoundingClientRect !== "undefined" ) { - box = elem.getBoundingClientRect(); - } - win = getWindow( doc ); - clientTop = docElem.clientTop || body.clientTop || 0; - clientLeft = docElem.clientLeft || body.clientLeft || 0; - scrollTop = win.pageYOffset || docElem.scrollTop; - scrollLeft = win.pageXOffset || docElem.scrollLeft; - return { - top: box.top + scrollTop - clientTop, - left: box.left + scrollLeft - clientLeft - }; -}; - -jQuery.offset = { - - bodyOffset: function( body ) { - var top = body.offsetTop, - left = body.offsetLeft; - - if ( jQuery.support.doesNotIncludeMarginInBodyOffset ) { - top += parseFloat( jQuery.css(body, "marginTop") ) || 0; - left += parseFloat( jQuery.css(body, "marginLeft") ) || 0; - } - - return { top: top, left: left }; - }, - - setOffset: function( elem, options, i ) { - var position = jQuery.css( elem, "position" ); - - // set position first, in-case top/left are set even on static elem - if ( position === "static" ) { - elem.style.position = "relative"; - } - - var curElem = jQuery( elem ), - curOffset = curElem.offset(), - curCSSTop = jQuery.css( elem, "top" ), - curCSSLeft = jQuery.css( elem, "left" ), - calculatePosition = ( position === "absolute" || position === "fixed" ) && jQuery.inArray("auto", [curCSSTop, curCSSLeft]) > -1, - props = {}, curPosition = {}, curTop, curLeft; - - // need to be able to calculate position if either top or left is auto and position is either absolute or fixed - if ( calculatePosition ) { - curPosition = curElem.position(); - curTop = curPosition.top; - curLeft = curPosition.left; - } else { - curTop = parseFloat( curCSSTop ) || 0; - curLeft = parseFloat( curCSSLeft ) || 0; - } - - if ( jQuery.isFunction( options ) ) { - options = options.call( elem, i, curOffset ); - } - - if ( options.top != null ) { - props.top = ( options.top - curOffset.top ) + curTop; - } - if ( options.left != null ) { - props.left = ( options.left - curOffset.left ) + curLeft; - } - - if ( "using" in options ) { - options.using.call( elem, props ); - } else { - curElem.css( props ); - } - } -}; - - -jQuery.fn.extend({ - - position: function() { - if ( !this[0] ) { - return; - } - - var elem = this[0], - - // Get *real* offsetParent - offsetParent = this.offsetParent(), - - // Get correct offsets - offset = this.offset(), - parentOffset = rroot.test(offsetParent[0].nodeName) ? { top: 0, left: 0 } : offsetParent.offset(); - - // Subtract element margins - // note: when an element has margin: auto the offsetLeft and marginLeft - // are the same in Safari causing offset.left to incorrectly be 0 - offset.top -= parseFloat( jQuery.css(elem, "marginTop") ) || 0; - offset.left -= parseFloat( jQuery.css(elem, "marginLeft") ) || 0; - - // Add offsetParent borders - parentOffset.top += parseFloat( jQuery.css(offsetParent[0], "borderTopWidth") ) || 0; - parentOffset.left += parseFloat( jQuery.css(offsetParent[0], "borderLeftWidth") ) || 0; - - // Subtract the two offsets - return { - top: offset.top - parentOffset.top, - left: offset.left - parentOffset.left - }; - }, - - offsetParent: function() { - return this.map(function() { - var offsetParent = this.offsetParent || document.body; - while ( offsetParent && (!rroot.test(offsetParent.nodeName) && jQuery.css(offsetParent, "position") === "static") ) { - offsetParent = offsetParent.offsetParent; - } - return offsetParent || document.body; - }); - } -}); - - -// Create scrollLeft and scrollTop methods -jQuery.each( {scrollLeft: "pageXOffset", scrollTop: "pageYOffset"}, function( method, prop ) { - var top = /Y/.test( prop ); - - jQuery.fn[ method ] = function( val ) { - return jQuery.access( this, function( elem, method, val ) { - var win = getWindow( elem ); - - if ( val === undefined ) { - return win ? (prop in win) ? win[ prop ] : - win.document.documentElement[ method ] : - elem[ method ]; - } - - if ( win ) { - win.scrollTo( - !top ? val : jQuery( win ).scrollLeft(), - top ? val : jQuery( win ).scrollTop() - ); - - } else { - elem[ method ] = val; - } - }, method, val, arguments.length, null ); - }; -}); - -function getWindow( elem ) { - return jQuery.isWindow( elem ) ? - elem : - elem.nodeType === 9 ? - elem.defaultView || elem.parentWindow : - false; -} -// Create innerHeight, innerWidth, height, width, outerHeight and outerWidth methods -jQuery.each( { Height: "height", Width: "width" }, function( name, type ) { - jQuery.each( { padding: "inner" + name, content: type, "": "outer" + name }, function( defaultExtra, funcName ) { - // margin is only for outerHeight, outerWidth - jQuery.fn[ funcName ] = function( margin, value ) { - var chainable = arguments.length && ( defaultExtra || typeof margin !== "boolean" ), - extra = defaultExtra || ( margin === true || value === true ? "margin" : "border" ); - - return jQuery.access( this, function( elem, type, value ) { - var doc; - - if ( jQuery.isWindow( elem ) ) { - // As of 5/8/2012 this will yield incorrect results for Mobile Safari, but there - // isn't a whole lot we can do. See pull request at this URL for discussion: - // https://github.com/jquery/jquery/pull/764 - return elem.document.documentElement[ "client" + name ]; - } - - // Get document width or height - if ( elem.nodeType === 9 ) { - doc = elem.documentElement; - - // Either scroll[Width/Height] or offset[Width/Height] or client[Width/Height], whichever is greatest - // unfortunately, this causes bug #3838 in IE6/8 only, but there is currently no good, small way to fix it. - return Math.max( - elem.body[ "scroll" + name ], doc[ "scroll" + name ], - elem.body[ "offset" + name ], doc[ "offset" + name ], - doc[ "client" + name ] - ); - } - - return value === undefined ? - // Get width or height on the element, requesting but not forcing parseFloat - jQuery.css( elem, type, value, extra ) : - - // Set width or height on the element - jQuery.style( elem, type, value, extra ); - }, type, chainable ? margin : undefined, chainable, null ); - }; - }); -}); -// Expose jQuery to the global object -window.jQuery = window.$ = jQuery; - -// Expose jQuery as an AMD module, but only for AMD loaders that -// understand the issues with loading multiple versions of jQuery -// in a page that all might call define(). The loader will indicate -// they have special allowances for multiple jQuery versions by -// specifying define.amd.jQuery = true. Register as a named module, -// since jQuery can be concatenated with other files that may use define, -// but not use a proper concatenation script that understands anonymous -// AMD modules. A named AMD is safest and most robust way to register. -// Lowercase jquery is used because AMD module names are derived from -// file names, and jQuery is normally delivered in a lowercase file name. -// Do this after creating the global so that if an AMD module wants to call -// noConflict to hide this version of jQuery, it will work. -if ( typeof define === "function" && define.amd && define.amd.jQuery ) { - define( "jquery", [], function () { return jQuery; } ); -} - -})( window ); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.mousewheel.js b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.mousewheel.js deleted file mode 100644 index a999c63..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/js/jquery.mousewheel.js +++ /dev/null @@ -1,86 +0,0 @@ -// Source: https://github.com/jquery/jquery-mousewheel/blob/a06ef4e1a127795606642c55e22d4f2945edc061/jquery.mousewheel.js - -/*! Copyright (c) 2011 Brandon Aaron (http://brandonaaron.net) - * Licensed under the MIT License (LICENSE.txt). - * - * Thanks to: http://adomas.org/javascript-mouse-wheel/ for some pointers. - * Thanks to: Mathias Bank(http://www.mathias-bank.de) for a scope bug fix. - * Thanks to: Seamus Leahy for adding deltaX and deltaY - * - * Version: 3.0.6 - * - * Requires: 1.2.2+ - */ - -(function($) { - -var types = ['DOMMouseScroll', 'mousewheel']; - -if ($.event.fixHooks) { - for ( var i=types.length; i; ) { - $.event.fixHooks[ types[--i] ] = $.event.mouseHooks; - } -} - -$.event.special.mousewheel = { - setup: function() { - if ( this.addEventListener ) { - for ( var i=types.length; i; ) { - this.addEventListener( types[--i], handler, false ); - } - } else { - this.onmousewheel = handler; - } - }, - - teardown: function() { - if ( this.removeEventListener ) { - for ( var i=types.length; i; ) { - this.removeEventListener( types[--i], handler, false ); - } - } else { - this.onmousewheel = null; - } - } -}; - -$.fn.extend({ - mousewheel: function(fn) { - return fn ? this.bind("mousewheel", fn) : this.trigger("mousewheel"); - }, - - unmousewheel: function(fn) { - return this.unbind("mousewheel", fn); - } -}); - - -function handler(event) { - var orgEvent = event || window.event, args = [].slice.call( arguments, 1 ), delta = 0, returnValue = true, deltaX = 0, deltaY = 0; - event = $.event.fix(orgEvent); - event.type = "mousewheel"; - - // Old school scrollwheel delta - if ( orgEvent.wheelDelta ) { delta = orgEvent.wheelDelta/120; } - if ( orgEvent.detail ) { delta = -orgEvent.detail/3; } - - // New school multidimensional scroll (touchpads) deltas - deltaY = delta; - - // Gecko - if ( orgEvent.axis !== undefined && orgEvent.axis === orgEvent.HORIZONTAL_AXIS ) { - deltaY = 0; - deltaX = -1*delta; - } - - // Webkit - if ( orgEvent.wheelDeltaY !== undefined ) { deltaY = orgEvent.wheelDeltaY/120; } - if ( orgEvent.wheelDeltaX !== undefined ) { deltaX = -1*orgEvent.wheelDeltaX/120; } - - // Add event and delta to the front of the arguments - args.unshift(event, delta, deltaX, deltaY); - - return ($.event.dispatch || $.event.handle).apply(this, args); -} - -})(jQuery); diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/make_chart_data.py b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/make_chart_data.py deleted file mode 100644 index dc15f72..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/make_chart_data.py +++ /dev/null @@ -1,151 +0,0 @@ -# random,1310720,google_dense_hash_map,45621248,0.344362020493 -# random,2621440,glib_hash_table,109867008,1.01163601875 -# random,2621440,stl_unordered_map,130715648,1.73484396935 -# random,2621440,boost_unordered_map,108380160,1.11585187912 -# random,2621440,google_sparse_hash_map,37015552,1.76031804085 -# random,2621440,google_dense_hash_map,79175680,0.504401922226 -# random,5242880,glib_hash_table,210530304,1.86031603813 -# random,5242880,stl_unordered_map,250298368,3.81597208977 -# random,5242880,boost_unordered_map,192184320,2.63760495186 -# random,5242880,google_sparse_hash_map,62066688,3.93570995331 -# random,5242880,google_dense_hash_map,146284544,1.22620105743 -# random,10485760,glib_hash_table,411856896,4.16937494278 -# random,10485760,stl_unordered_map,490430464,7.91806197166 -# random,10485760,boost_unordered_map,359251968,7.52085900307 -# random,10485760,google_sparse_hash_map,111902720,8.11318516731 -# random,10485760,google_dense_hash_map,280502272,2.32930994034 -# random,20971520,glib_hash_table,814510080,8.32456207275 -# random,20971520,stl_unordered_map,971583488,16.1606841087 -# random,20971520,boost_unordered_map,692441088,24.5845990181 -# random,20971520,google_sparse_hash_map,211435520,16.2772600651 -# random,20971520,google_dense_hash_map,548937728,4.85360789299 -# random,41943040,glib_hash_table,1619816448,90.6313672066 - -import sys, json - -lines = [ line.strip() for line in sys.stdin if line.strip() ] - -by_benchtype = {} -benches = {} -programs = {} - -for line in lines: - benchtype, type, nkeys, program, value = line.split(',') - nkeys = int(nkeys) - programs[program] = 1 - - if (type == 'time'): - by_benchtype.setdefault("%s-runtime" % benchtype, {}).setdefault(program, []).append([nkeys, float(value)]) - else: - by_benchtype.setdefault("%s-memory" % benchtype, {}).setdefault(program, []).append([nkeys, int(value)]) - benches[benchtype] = 1 - -proper_names = { - 'std::unordered_map': 'std::unordered_map (1 thread)', - 'spp::sparse_hash_map': 'sparsepp (1 thread, use_spp_alloc)', - 'absl::flat_hash_map': 'absl::flat_hash_map (1 thread)', - 'phmap::flat_hash_map': 'phmap::flat_hash_map', - 'phmap::parallel_flat_hash_map': 'phmap::parallel_flat_hash_map (1 thread)', - 'phmap::parallel_flat_hash_map_mt': 'phmap::parallel_flat_hash_map (8 thread)', - 'absl::parallel_flat_hash_map': 'absl::parallel_flat_hash_map (1 thread)', - 'absl::parallel_flat_hash_map_mt': 'absl::parallel_flat_hash_map (8 threads)', - 'phmap::parallel_flat_hash_map_4': 'phmap::parallel_flat_hash_map (N=4, 8 threads)', - 'phmap::parallel_flat_hash_map_5': 'phmap::parallel_flat_hash_map (N=5, 8 threads)', - 'phmap::parallel_flat_hash_map_6': 'phmap::parallel_flat_hash_map (N=6, 8 threads)' -} - -proper_color = { - 'std::unordered_map': 0, - 'spp::sparse_hash_map': 0, - 'absl::flat_hash_map': 1, - 'phmap::flat_hash_map': 1, - 'phmap::parallel_flat_hash_map': 2, - 'phmap::parallel_flat_hash_map_mt': 2, - 'absl::parallel_flat_hash_map': 3, - 'absl::parallel_flat_hash_map_mt': 3, - 'phmap::parallel_flat_hash_map_4': 3, - 'phmap::parallel_flat_hash_map_5': 4, - 'phmap::parallel_flat_hash_map_6': 5 -} - -bench_titles = { - 'lookup': 'Random Lookup', - 'sequential' : 'Sequential Insert', - 'random' : 'Random Insert', - 'delete' : 'Deletion', - 'sequentialstring' : 'Sequential String Insert', - 'randomstring' : 'Random String Insert', - 'deletestring' : 'String Deletion' - } - -# do them in the desired order to make the legend not overlap the chart data -# too much -program_slugs = [ - 'std::unordered_map', - 'sparsepp', - 'absl::flat_hash_map', - 'phmap::flat_hash_map', - 'absl::parallel_flat_hash_map', - 'phmap::parallel_flat_hash_map', - 'phmap::parallel_flat_hash_map_mt', - 'absl::parallel_flat_hash_map_mt', - 'phmap::parallel_flat_hash_map_4', - 'phmap::parallel_flat_hash_map_5', - 'phmap::parallel_flat_hash_map_6' -] - -chart_data = {} - -for i, (benchtype, programs) in enumerate(by_benchtype.items()): - chart_data[benchtype] = [] - k = programs.keys() - k.sort() - for program in k: - data = programs.get(program, []) - chart_data[benchtype].append({ - 'label': proper_names[program], - 'color': proper_color[program], - 'data': [], - }) - - for k, (nkeys, value) in enumerate(data): - chart_data[benchtype][-1]['data'].append([nkeys, value]) - -html_chart_data = 'chart_data = ' + json.dumps(chart_data) - -## print chart_data['delete-runtime'] - -html_plot_spec = '' -for b in benches.keys(): - html_plot_spec += """ - $.plot($("#{0}-runtime"), chart_data['{0}-runtime'], runtime_settings); - $.plot($("#{0}-memory"), chart_data['{0}-memory'], memory_settings);""".format(b) - -html_div_spec = '' -first = 1 - -for b in benches.keys(): - if 1: - first = 0 - html_div_spec += """ -
{1} (integers): Memory Usage
-
-
number of entries in hash table
-""".format(b, bench_titles[b]) - - html_div_spec += """ -
{1} (integers): Execution Time
-
-
number of entries in hash table
- -""".format(b, bench_titles[b]) - - - -html_template = file('charts-template.html', 'r').read() - -html_template = html_template.replace('__CHART_DATA_GOES_HERE__', html_chart_data) -html_template = html_template.replace('__PLOT_SPEC_GOES_HERE__', html_plot_spec) -html_template = html_template.replace('__PLOT_DIV_SPEC_GOES_HERE__', html_div_spec) - -file('charts.html', 'w').write(html_template) diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_flat_par b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_flat_par deleted file mode 100644 index dbd9c01..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_flat_par +++ /dev/null @@ -1,320 +0,0 @@ -random,memory,0,phmap::flat_hash_map,15740928 -random,memory,240967,phmap::flat_hash_map,11272192 -random,memory,458751,phmap::flat_hash_map,11272192 -random,memory,458752,phmap::flat_hash_map,29138944 -random,memory,493930,phmap::flat_hash_map,20205568 -random,memory,678845,phmap::flat_hash_map,19087360 -random,memory,917503,phmap::flat_hash_map,19087360 -random,memory,917504,phmap::flat_hash_map,54812672 -random,memory,930488,phmap::flat_hash_map,36945920 -random,memory,1835007,phmap::flat_hash_map,36945920 -random,memory,1835008,phmap::flat_hash_map,108392448 -random,memory,1860950,phmap::flat_hash_map,72667136 -random,memory,3670015,phmap::flat_hash_map,72667136 -random,memory,3670016,phmap::flat_hash_map,215556096 -random,memory,3681610,phmap::flat_hash_map,144109568 -random,memory,7340031,phmap::flat_hash_map,144109568 -random,memory,7340032,phmap::flat_hash_map,429883392 -random,memory,7348393,phmap::flat_hash_map,286994432 -random,time,10000000,phmap::flat_hash_map,0.917000 -random,memory,10000174,phmap::flat_hash_map,286994432 -random,memory,14680063,phmap::flat_hash_map,286994432 -random,memory,14680064,phmap::flat_hash_map,858537984 -random,memory,14680512,phmap::flat_hash_map,572764160 -random,time,20000000,phmap::flat_hash_map,2.199000 -random,memory,20013320,phmap::flat_hash_map,572764160 -random,memory,29360127,phmap::flat_hash_map,572764160 -random,memory,29360128,phmap::flat_hash_map,1715859456 -random,memory,29369210,phmap::flat_hash_map,1144307712 -random,time,30000000,phmap::flat_hash_map,3.818000 -random,memory,30002294,phmap::flat_hash_map,1144307712 -random,time,40000000,phmap::flat_hash_map,5.070000 -random,memory,40010797,phmap::flat_hash_map,1144307712 -random,time,50000000,phmap::flat_hash_map,6.323000 -random,memory,50009729,phmap::flat_hash_map,1144307712 -random,memory,58720255,phmap::flat_hash_map,1144307712 -random,memory,58720256,phmap::flat_hash_map,3430486016 -random,memory,58721491,phmap::flat_hash_map,2287403008 -random,time,60000000,phmap::flat_hash_map,8.700000 -random,memory,60002661,phmap::flat_hash_map,2287403008 -random,time,70000000,phmap::flat_hash_map,10.155000 -random,memory,70010887,phmap::flat_hash_map,2287403008 -random,time,80000000,phmap::flat_hash_map,11.589000 -random,memory,80002681,phmap::flat_hash_map,2287403008 -random,time,90000000,phmap::flat_hash_map,13.020000 -random,memory,90012468,phmap::flat_hash_map,2287403008 -random,time,100000000,phmap::flat_hash_map,14.407000 -random,memory,0,phmap::parallel_flat_hash_map,6688768 -random,memory,228584,phmap::parallel_flat_hash_map,6688768 -random,memory,228585,phmap::parallel_flat_hash_map,8372224 -random,memory,230683,phmap::parallel_flat_hash_map,8372224 -random,memory,230684,phmap::parallel_flat_hash_map,11190272 -random,memory,257042,phmap::parallel_flat_hash_map,11190272 -random,memory,257105,phmap::parallel_flat_hash_map,11751424 -random,memory,456008,phmap::parallel_flat_hash_map,11751424 -random,memory,456009,phmap::parallel_flat_hash_map,13996032 -random,memory,456819,phmap::parallel_flat_hash_map,13582336 -random,memory,458740,phmap::parallel_flat_hash_map,13582336 -random,memory,458741,phmap::parallel_flat_hash_map,15847424 -random,memory,459266,phmap::parallel_flat_hash_map,15847424 -random,memory,459267,phmap::parallel_flat_hash_map,17518592 -random,memory,465267,phmap::parallel_flat_hash_map,17518592 -random,memory,465268,phmap::parallel_flat_hash_map,21155840 -random,memory,494937,phmap::parallel_flat_hash_map,20598784 -random,memory,911236,phmap::parallel_flat_hash_map,20598784 -random,memory,911237,phmap::parallel_flat_hash_map,22835200 -random,memory,913145,phmap::parallel_flat_hash_map,22835200 -random,memory,913146,phmap::parallel_flat_hash_map,25067520 -random,memory,914338,phmap::parallel_flat_hash_map,25067520 -random,memory,914339,phmap::parallel_flat_hash_map,27303936 -random,memory,916480,phmap::parallel_flat_hash_map,27303936 -random,memory,916481,phmap::parallel_flat_hash_map,29536256 -random,memory,916523,phmap::parallel_flat_hash_map,29536256 -random,memory,916524,phmap::parallel_flat_hash_map,30650368 -random,memory,917790,phmap::parallel_flat_hash_map,30650368 -random,memory,917791,phmap::parallel_flat_hash_map,32882688 -random,memory,918474,phmap::parallel_flat_hash_map,32882688 -random,memory,918475,phmap::parallel_flat_hash_map,35115008 -random,memory,924420,phmap::parallel_flat_hash_map,35115008 -random,memory,924421,phmap::parallel_flat_hash_map,37355520 -random,memory,937279,phmap::parallel_flat_hash_map,37355520 -random,memory,937319,phmap::parallel_flat_hash_map,38465536 -random,memory,1826595,phmap::parallel_flat_hash_map,38465536 -random,memory,1826596,phmap::parallel_flat_hash_map,42934272 -random,memory,1827102,phmap::parallel_flat_hash_map,42934272 -random,memory,1827103,phmap::parallel_flat_hash_map,45170688 -random,memory,1828265,phmap::parallel_flat_hash_map,45170688 -random,memory,1828266,phmap::parallel_flat_hash_map,47398912 -random,memory,1831166,phmap::parallel_flat_hash_map,47398912 -random,memory,1831167,phmap::parallel_flat_hash_map,49631232 -random,memory,1831515,phmap::parallel_flat_hash_map,49631232 -random,memory,1831516,phmap::parallel_flat_hash_map,51867648 -random,memory,1833264,phmap::parallel_flat_hash_map,51867648 -random,memory,1833265,phmap::parallel_flat_hash_map,54099968 -random,memory,1833345,phmap::parallel_flat_hash_map,54099968 -random,memory,1833346,phmap::parallel_flat_hash_map,56332288 -random,memory,1835078,phmap::parallel_flat_hash_map,56332288 -random,memory,1835079,phmap::parallel_flat_hash_map,58572800 -random,memory,1836213,phmap::parallel_flat_hash_map,58572800 -random,memory,1836214,phmap::parallel_flat_hash_map,60801024 -random,memory,1836364,phmap::parallel_flat_hash_map,60801024 -random,memory,1836365,phmap::parallel_flat_hash_map,63033344 -random,memory,1836849,phmap::parallel_flat_hash_map,63033344 -random,memory,1836850,phmap::parallel_flat_hash_map,65265664 -random,memory,1838065,phmap::parallel_flat_hash_map,65265664 -random,memory,1838066,phmap::parallel_flat_hash_map,67502080 -random,memory,1839241,phmap::parallel_flat_hash_map,67502080 -random,memory,1839242,phmap::parallel_flat_hash_map,69734400 -random,memory,1839771,phmap::parallel_flat_hash_map,69734400 -random,memory,1839772,phmap::parallel_flat_hash_map,71962624 -random,memory,1844031,phmap::parallel_flat_hash_map,71962624 -random,memory,1844032,phmap::parallel_flat_hash_map,74194944 -random,memory,1844165,phmap::parallel_flat_hash_map,74194944 -random,memory,1844166,phmap::parallel_flat_hash_map,76423168 -random,memory,1849529,phmap::parallel_flat_hash_map,74186752 -random,memory,3656347,phmap::parallel_flat_hash_map,74186752 -random,memory,3656348,phmap::parallel_flat_hash_map,83120128 -random,memory,3658236,phmap::parallel_flat_hash_map,83120128 -random,memory,3658237,phmap::parallel_flat_hash_map,87592960 -random,memory,3660832,phmap::parallel_flat_hash_map,87592960 -random,memory,3660833,phmap::parallel_flat_hash_map,92061696 -random,memory,3663897,phmap::parallel_flat_hash_map,92061696 -random,memory,3663898,phmap::parallel_flat_hash_map,96526336 -random,memory,3668004,phmap::parallel_flat_hash_map,96526336 -random,memory,3668005,phmap::parallel_flat_hash_map,100990976 -random,memory,3668781,phmap::parallel_flat_hash_map,100990976 -random,memory,3668782,phmap::parallel_flat_hash_map,105459712 -random,memory,3669316,phmap::parallel_flat_hash_map,100990976 -random,memory,3669736,phmap::parallel_flat_hash_map,100990976 -random,memory,3669737,phmap::parallel_flat_hash_map,109928448 -random,memory,3670780,phmap::parallel_flat_hash_map,109928448 -random,memory,3670781,phmap::parallel_flat_hash_map,114388992 -random,memory,3671735,phmap::parallel_flat_hash_map,114388992 -random,memory,3671736,phmap::parallel_flat_hash_map,118853632 -random,memory,3672742,phmap::parallel_flat_hash_map,118853632 -random,memory,3672743,phmap::parallel_flat_hash_map,123322368 -random,memory,3672841,phmap::parallel_flat_hash_map,123322368 -random,memory,3672842,phmap::parallel_flat_hash_map,127787008 -random,memory,3674661,phmap::parallel_flat_hash_map,127787008 -random,memory,3674662,phmap::parallel_flat_hash_map,132259840 -random,memory,3675918,phmap::parallel_flat_hash_map,132259840 -random,memory,3675919,phmap::parallel_flat_hash_map,136724480 -random,memory,3676502,phmap::parallel_flat_hash_map,136724480 -random,memory,3676503,phmap::parallel_flat_hash_map,141180928 -random,memory,3676556,phmap::parallel_flat_hash_map,141180928 -random,memory,3676557,phmap::parallel_flat_hash_map,145645568 -random,memory,3679943,phmap::parallel_flat_hash_map,141176832 -random,memory,3682847,phmap::parallel_flat_hash_map,141176832 -random,memory,3682848,phmap::parallel_flat_hash_map,150106112 -random,memory,3701420,phmap::parallel_flat_hash_map,145637376 -random,memory,7323550,phmap::parallel_flat_hash_map,145637376 -random,memory,7323551,phmap::parallel_flat_hash_map,163500032 -random,memory,7325955,phmap::parallel_flat_hash_map,154566656 -random,memory,7326781,phmap::parallel_flat_hash_map,154566656 -random,memory,7326782,phmap::parallel_flat_hash_map,172433408 -random,memory,7327471,phmap::parallel_flat_hash_map,172433408 -random,memory,7327472,phmap::parallel_flat_hash_map,181362688 -random,memory,7328548,phmap::parallel_flat_hash_map,181362688 -random,memory,7328549,phmap::parallel_flat_hash_map,190300160 -random,memory,7331571,phmap::parallel_flat_hash_map,190300160 -random,memory,7331572,phmap::parallel_flat_hash_map,199229440 -random,memory,7333270,phmap::parallel_flat_hash_map,199229440 -random,memory,7333271,phmap::parallel_flat_hash_map,208154624 -random,memory,7336330,phmap::parallel_flat_hash_map,208154624 -random,memory,7336331,phmap::parallel_flat_hash_map,217083904 -random,memory,7338941,phmap::parallel_flat_hash_map,217083904 -random,memory,7338942,phmap::parallel_flat_hash_map,226021376 -random,memory,7339987,phmap::parallel_flat_hash_map,226021376 -random,memory,7339988,phmap::parallel_flat_hash_map,234950656 -random,memory,7340192,phmap::parallel_flat_hash_map,234950656 -random,memory,7340193,phmap::parallel_flat_hash_map,243879936 -random,memory,7340212,phmap::parallel_flat_hash_map,243879936 -random,memory,7340213,phmap::parallel_flat_hash_map,252805120 -random,memory,7340756,phmap::parallel_flat_hash_map,252805120 -random,memory,7340757,phmap::parallel_flat_hash_map,261734400 -random,memory,7353138,phmap::parallel_flat_hash_map,261734400 -random,memory,7353139,phmap::parallel_flat_hash_map,270659584 -random,memory,7355638,phmap::parallel_flat_hash_map,270659584 -random,memory,7355639,phmap::parallel_flat_hash_map,279592960 -random,memory,7358552,phmap::parallel_flat_hash_map,279592960 -random,memory,7358553,phmap::parallel_flat_hash_map,288522240 -random,memory,7363002,phmap::parallel_flat_hash_map,279584768 -random,memory,7364175,phmap::parallel_flat_hash_map,279584768 -random,memory,7364176,phmap::parallel_flat_hash_map,297451520 -random,memory,7379232,phmap::parallel_flat_hash_map,288518144 -random,time,10000000,phmap::parallel_flat_hash_map,1.115000 -random,memory,10001837,phmap::parallel_flat_hash_map,288518144 -random,memory,14641981,phmap::parallel_flat_hash_map,288518144 -random,memory,14641982,phmap::parallel_flat_hash_map,324243456 -random,memory,14649536,phmap::parallel_flat_hash_map,306380800 -random,memory,14658094,phmap::parallel_flat_hash_map,306380800 -random,memory,14658095,phmap::parallel_flat_hash_map,342106112 -random,memory,14663598,phmap::parallel_flat_hash_map,342106112 -random,memory,14663599,phmap::parallel_flat_hash_map,359968768 -random,memory,14667323,phmap::parallel_flat_hash_map,359968768 -random,memory,14667324,phmap::parallel_flat_hash_map,377835520 -random,memory,14668425,phmap::parallel_flat_hash_map,377835520 -random,memory,14668426,phmap::parallel_flat_hash_map,395694080 -random,memory,14669145,phmap::parallel_flat_hash_map,395694080 -random,memory,14669146,phmap::parallel_flat_hash_map,413560832 -random,memory,14669916,phmap::parallel_flat_hash_map,395698176 -random,memory,14673506,phmap::parallel_flat_hash_map,395698176 -random,memory,14673507,phmap::parallel_flat_hash_map,431423488 -random,memory,14676201,phmap::parallel_flat_hash_map,431423488 -random,memory,14676202,phmap::parallel_flat_hash_map,449286144 -random,memory,14681323,phmap::parallel_flat_hash_map,449286144 -random,memory,14681324,phmap::parallel_flat_hash_map,467152896 -random,memory,14684771,phmap::parallel_flat_hash_map,449286144 -random,memory,14686498,phmap::parallel_flat_hash_map,449286144 -random,memory,14686499,phmap::parallel_flat_hash_map,485011456 -random,memory,14691299,phmap::parallel_flat_hash_map,485011456 -random,memory,14691300,phmap::parallel_flat_hash_map,502865920 -random,memory,14694531,phmap::parallel_flat_hash_map,502865920 -random,memory,14694532,phmap::parallel_flat_hash_map,520728576 -random,memory,14696324,phmap::parallel_flat_hash_map,520728576 -random,memory,14696325,phmap::parallel_flat_hash_map,538587136 -random,memory,14701255,phmap::parallel_flat_hash_map,538587136 -random,memory,14701256,phmap::parallel_flat_hash_map,556445696 -random,memory,14702651,phmap::parallel_flat_hash_map,538578944 -random,memory,14704639,phmap::parallel_flat_hash_map,538578944 -random,memory,14704640,phmap::parallel_flat_hash_map,574300160 -random,memory,14707748,phmap::parallel_flat_hash_map,574300160 -random,memory,14707749,phmap::parallel_flat_hash_map,592158720 -random,memory,14711651,phmap::parallel_flat_hash_map,574296064 -random,time,20000000,phmap::parallel_flat_hash_map,2.484000 -random,memory,20006564,phmap::parallel_flat_hash_map,574296064 -random,memory,29321443,phmap::parallel_flat_hash_map,574296064 -random,memory,29321444,phmap::parallel_flat_hash_map,645742592 -random,memory,29322576,phmap::parallel_flat_hash_map,645742592 -random,memory,29322577,phmap::parallel_flat_hash_map,681463808 -random,memory,29331685,phmap::parallel_flat_hash_map,645742592 -random,memory,29333141,phmap::parallel_flat_hash_map,645742592 -random,memory,29333142,phmap::parallel_flat_hash_map,717189120 -random,memory,29336447,phmap::parallel_flat_hash_map,717189120 -random,memory,29336448,phmap::parallel_flat_hash_map,752910336 -random,memory,29345055,phmap::parallel_flat_hash_map,717185024 -random,memory,29352413,phmap::parallel_flat_hash_map,717185024 -random,memory,29352414,phmap::parallel_flat_hash_map,788627456 -random,memory,29357051,phmap::parallel_flat_hash_map,752902144 -random,memory,29357141,phmap::parallel_flat_hash_map,752902144 -random,memory,29357142,phmap::parallel_flat_hash_map,824348672 -random,memory,29359481,phmap::parallel_flat_hash_map,824348672 -random,memory,29359482,phmap::parallel_flat_hash_map,860069888 -random,memory,29362077,phmap::parallel_flat_hash_map,860069888 -random,memory,29362078,phmap::parallel_flat_hash_map,895795200 -random,memory,29366120,phmap::parallel_flat_hash_map,895795200 -random,memory,29366121,phmap::parallel_flat_hash_map,931512320 -random,memory,29368098,phmap::parallel_flat_hash_map,931512320 -random,memory,29368099,phmap::parallel_flat_hash_map,967229440 -random,memory,29368978,phmap::parallel_flat_hash_map,931508224 -random,memory,29373953,phmap::parallel_flat_hash_map,931508224 -random,memory,29373954,phmap::parallel_flat_hash_map,1002954752 -random,memory,29376083,phmap::parallel_flat_hash_map,1002954752 -random,memory,29376084,phmap::parallel_flat_hash_map,1038680064 -random,memory,29378406,phmap::parallel_flat_hash_map,1002950656 -random,memory,29378970,phmap::parallel_flat_hash_map,1002950656 -random,memory,29378971,phmap::parallel_flat_hash_map,1074397184 -random,memory,29380083,phmap::parallel_flat_hash_map,1074397184 -random,memory,29380084,phmap::parallel_flat_hash_map,1110122496 -random,memory,29381250,phmap::parallel_flat_hash_map,1110122496 -random,memory,29381251,phmap::parallel_flat_hash_map,1145839616 -random,memory,29384576,phmap::parallel_flat_hash_map,1110110208 -random,memory,29391514,phmap::parallel_flat_hash_map,1110110208 -random,memory,29391515,phmap::parallel_flat_hash_map,1181556736 -random,memory,29398827,phmap::parallel_flat_hash_map,1145835520 -random,time,30000000,phmap::parallel_flat_hash_map,4.197000 -random,memory,30003726,phmap::parallel_flat_hash_map,1145835520 -random,time,40000000,phmap::parallel_flat_hash_map,5.407000 -random,memory,40004260,phmap::parallel_flat_hash_map,1145835520 -random,time,50000000,phmap::parallel_flat_hash_map,6.651000 -random,memory,50008463,phmap::parallel_flat_hash_map,1145835520 -random,memory,58650774,phmap::parallel_flat_hash_map,1145835520 -random,memory,58650775,phmap::parallel_flat_hash_map,1288724480 -random,memory,58651341,phmap::parallel_flat_hash_map,1217282048 -random,memory,58670748,phmap::parallel_flat_hash_map,1217282048 -random,memory,58670749,phmap::parallel_flat_hash_map,1360171008 -random,memory,58672543,phmap::parallel_flat_hash_map,1288724480 -random,memory,58679343,phmap::parallel_flat_hash_map,1288724480 -random,memory,58679344,phmap::parallel_flat_hash_map,1431609344 -random,memory,58679621,phmap::parallel_flat_hash_map,1431609344 -random,memory,58679622,phmap::parallel_flat_hash_map,1503047680 -random,memory,58681449,phmap::parallel_flat_hash_map,1503047680 -random,memory,58681450,phmap::parallel_flat_hash_map,1574498304 -random,memory,58687442,phmap::parallel_flat_hash_map,1503055872 -random,memory,58716081,phmap::parallel_flat_hash_map,1503055872 -random,memory,58716082,phmap::parallel_flat_hash_map,1645944832 -random,memory,58719988,phmap::parallel_flat_hash_map,1574498304 -random,memory,58720496,phmap::parallel_flat_hash_map,1574498304 -random,memory,58720497,phmap::parallel_flat_hash_map,1717387264 -random,memory,58723911,phmap::parallel_flat_hash_map,1717387264 -random,memory,58723912,phmap::parallel_flat_hash_map,1788841984 -random,memory,58728095,phmap::parallel_flat_hash_map,1717395456 -random,memory,58731608,phmap::parallel_flat_hash_map,1717395456 -random,memory,58731609,phmap::parallel_flat_hash_map,1860280320 -random,memory,58737338,phmap::parallel_flat_hash_map,1860280320 -random,memory,58737339,phmap::parallel_flat_hash_map,1931718656 -random,memory,58737397,phmap::parallel_flat_hash_map,1860272128 -random,memory,58744726,phmap::parallel_flat_hash_map,1860272128 -random,memory,58744727,phmap::parallel_flat_hash_map,2003165184 -random,memory,58746205,phmap::parallel_flat_hash_map,1931718656 -random,memory,58752523,phmap::parallel_flat_hash_map,1931718656 -random,memory,58752524,phmap::parallel_flat_hash_map,2074607616 -random,memory,58753838,phmap::parallel_flat_hash_map,2074607616 -random,memory,58753839,phmap::parallel_flat_hash_map,2146054144 -random,memory,58756083,phmap::parallel_flat_hash_map,2146054144 -random,memory,58756084,phmap::parallel_flat_hash_map,2217496576 -random,memory,58758266,phmap::parallel_flat_hash_map,2217496576 -random,memory,58758267,phmap::parallel_flat_hash_map,2288943104 -random,memory,58760494,phmap::parallel_flat_hash_map,2217492480 -random,memory,58766191,phmap::parallel_flat_hash_map,2217492480 -random,memory,58766192,phmap::parallel_flat_hash_map,2360381440 -random,memory,58773720,phmap::parallel_flat_hash_map,2288934912 -random,time,60000000,phmap::parallel_flat_hash_map,9.067000 -random,memory,60002586,phmap::parallel_flat_hash_map,2288934912 -random,time,70000000,phmap::parallel_flat_hash_map,10.409000 -random,memory,70011900,phmap::parallel_flat_hash_map,2288934912 -random,time,80000000,phmap::parallel_flat_hash_map,11.747000 -random,memory,80002981,phmap::parallel_flat_hash_map,2288934912 -random,time,90000000,phmap::parallel_flat_hash_map,13.091000 -random,memory,90007030,phmap::parallel_flat_hash_map,2288934912 -random,time,100000000,phmap::parallel_flat_hash_map,14.467000 diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_flat_par_mutex_4 b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_flat_par_mutex_4 deleted file mode 100644 index c22410f..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_flat_par_mutex_4 +++ /dev/null @@ -1,316 +0,0 @@ -random,memory,0,phmap::flat_hash_map,15753216 -random,memory,299298,phmap::flat_hash_map,15753216 -random,memory,299585,phmap::flat_hash_map,11284480 -random,memory,458751,phmap::flat_hash_map,11284480 -random,memory,458752,phmap::flat_hash_map,29151232 -random,memory,505180,phmap::flat_hash_map,29151232 -random,memory,505419,phmap::flat_hash_map,20217856 -random,memory,599314,phmap::flat_hash_map,20217856 -random,memory,599573,phmap::flat_hash_map,19095552 -random,memory,917503,phmap::flat_hash_map,19095552 -random,memory,917504,phmap::flat_hash_map,54820864 -random,memory,963390,phmap::flat_hash_map,54820864 -random,memory,963601,phmap::flat_hash_map,36954112 -random,memory,1835007,phmap::flat_hash_map,36954112 -random,memory,1835008,phmap::flat_hash_map,108400640 -random,memory,1860352,phmap::flat_hash_map,108400640 -random,memory,1860468,phmap::flat_hash_map,72675328 -random,memory,3670015,phmap::flat_hash_map,72675328 -random,memory,3670016,phmap::flat_hash_map,215564288 -random,memory,3686835,phmap::flat_hash_map,215564288 -random,memory,3686879,phmap::flat_hash_map,144117760 -random,memory,7340031,phmap::flat_hash_map,144117760 -random,memory,7340032,phmap::flat_hash_map,429891584 -random,memory,7352288,phmap::flat_hash_map,429891584 -random,memory,7352289,phmap::flat_hash_map,287002624 -random,time,10000000,phmap::flat_hash_map,0.923000 -random,memory,10003658,phmap::flat_hash_map,287002624 -random,memory,14680063,phmap::flat_hash_map,287002624 -random,memory,14680064,phmap::flat_hash_map,858554368 -random,memory,14690134,phmap::flat_hash_map,858554368 -random,memory,14690179,phmap::flat_hash_map,572776448 -random,time,20000000,phmap::flat_hash_map,2.224000 -random,memory,20009269,phmap::flat_hash_map,572776448 -random,memory,29360127,phmap::flat_hash_map,572776448 -random,memory,29360128,phmap::flat_hash_map,1715867648 -random,memory,29366940,phmap::flat_hash_map,1715867648 -random,memory,29366994,phmap::flat_hash_map,1144315904 -random,time,30000000,phmap::flat_hash_map,3.846000 -random,memory,30008884,phmap::flat_hash_map,1144315904 -random,time,40000000,phmap::flat_hash_map,5.131000 -random,memory,40002137,phmap::flat_hash_map,1144315904 -random,time,50000000,phmap::flat_hash_map,6.445000 -random,memory,50005385,phmap::flat_hash_map,1144315904 -random,memory,58720255,phmap::flat_hash_map,1144315904 -random,memory,58720256,phmap::flat_hash_map,3430494208 -random,memory,58729667,phmap::flat_hash_map,3430494208 -random,memory,58729713,phmap::flat_hash_map,2287415296 -random,time,60000000,phmap::flat_hash_map,8.827000 -random,memory,60010828,phmap::flat_hash_map,2287415296 -random,time,70000000,phmap::flat_hash_map,10.317000 -random,memory,70006827,phmap::flat_hash_map,2287415296 -random,time,80000000,phmap::flat_hash_map,11.803000 -random,memory,80002090,phmap::flat_hash_map,2287415296 -random,time,90000000,phmap::flat_hash_map,13.277000 -random,memory,90005422,phmap::flat_hash_map,2287415296 -random,time,100000000,phmap::flat_hash_map,14.716000 -random,memory,0,phmap::parallel_flat_hash_map_mt,7884800 -random,memory,229053,phmap::parallel_flat_hash_map_mt,7884800 -random,memory,229054,phmap::parallel_flat_hash_map_mt,12562432 -random,memory,244827,phmap::parallel_flat_hash_map_mt,12562432 -random,memory,244899,phmap::parallel_flat_hash_map_mt,13127680 -random,memory,455707,phmap::parallel_flat_hash_map_mt,13127680 -random,memory,455708,phmap::parallel_flat_hash_map_mt,14254080 -random,memory,457702,phmap::parallel_flat_hash_map_mt,14254080 -random,memory,457703,phmap::parallel_flat_hash_map_mt,14700544 -random,memory,458422,phmap::parallel_flat_hash_map_mt,14700544 -random,memory,458423,phmap::parallel_flat_hash_map_mt,16953344 -random,memory,459497,phmap::parallel_flat_hash_map_mt,16953344 -random,memory,459498,phmap::parallel_flat_hash_map_mt,18644992 -random,memory,460681,phmap::parallel_flat_hash_map_mt,18644992 -random,memory,460682,phmap::parallel_flat_hash_map_mt,21450752 -random,memory,482289,phmap::parallel_flat_hash_map_mt,21450752 -random,memory,482359,phmap::parallel_flat_hash_map_mt,22495232 -random,memory,914327,phmap::parallel_flat_hash_map_mt,22495232 -random,memory,914328,phmap::parallel_flat_hash_map_mt,25849856 -random,memory,914709,phmap::parallel_flat_hash_map_mt,25849856 -random,memory,914710,phmap::parallel_flat_hash_map_mt,29200384 -random,memory,915833,phmap::parallel_flat_hash_map_mt,29200384 -random,memory,915834,phmap::parallel_flat_hash_map_mt,33673216 -random,memory,917742,phmap::parallel_flat_hash_map_mt,33673216 -random,memory,917743,phmap::parallel_flat_hash_map_mt,35905536 -random,memory,920586,phmap::parallel_flat_hash_map_mt,35905536 -random,memory,920587,phmap::parallel_flat_hash_map_mt,37023744 -random,memory,921976,phmap::parallel_flat_hash_map_mt,37023744 -random,memory,921977,phmap::parallel_flat_hash_map_mt,40370176 -random,memory,922650,phmap::parallel_flat_hash_map_mt,41484288 -random,memory,983562,phmap::parallel_flat_hash_map_mt,41484288 -random,memory,1172876,phmap::parallel_flat_hash_map_mt,40366080 -random,memory,1825547,phmap::parallel_flat_hash_map_mt,40366080 -random,memory,1825548,phmap::parallel_flat_hash_map_mt,44834816 -random,memory,1826741,phmap::parallel_flat_hash_map_mt,44834816 -random,memory,1826816,phmap::parallel_flat_hash_map_mt,42598400 -random,memory,1828151,phmap::parallel_flat_hash_map_mt,42598400 -random,memory,1828152,phmap::parallel_flat_hash_map_mt,47067136 -random,memory,1830082,phmap::parallel_flat_hash_map_mt,47067136 -random,memory,1830083,phmap::parallel_flat_hash_map_mt,49295360 -random,memory,1831123,phmap::parallel_flat_hash_map_mt,49295360 -random,memory,1831124,phmap::parallel_flat_hash_map_mt,51531776 -random,memory,1832184,phmap::parallel_flat_hash_map_mt,51531776 -random,memory,1832185,phmap::parallel_flat_hash_map_mt,53760000 -random,memory,1832769,phmap::parallel_flat_hash_map_mt,53760000 -random,memory,1832770,phmap::parallel_flat_hash_map_mt,55996416 -random,memory,1832874,phmap::parallel_flat_hash_map_mt,55996416 -random,memory,1832875,phmap::parallel_flat_hash_map_mt,58228736 -random,memory,1835220,phmap::parallel_flat_hash_map_mt,58228736 -random,memory,1835221,phmap::parallel_flat_hash_map_mt,60465152 -random,memory,1836418,phmap::parallel_flat_hash_map_mt,60465152 -random,memory,1836419,phmap::parallel_flat_hash_map_mt,62701568 -random,memory,1836633,phmap::parallel_flat_hash_map_mt,62701568 -random,memory,1836634,phmap::parallel_flat_hash_map_mt,64929792 -random,memory,1837724,phmap::parallel_flat_hash_map_mt,64929792 -random,memory,1837725,phmap::parallel_flat_hash_map_mt,67158016 -random,memory,1838209,phmap::parallel_flat_hash_map_mt,67158016 -random,memory,1838210,phmap::parallel_flat_hash_map_mt,69394432 -random,memory,1838995,phmap::parallel_flat_hash_map_mt,69394432 -random,memory,1838996,phmap::parallel_flat_hash_map_mt,71622656 -random,memory,1840918,phmap::parallel_flat_hash_map_mt,71622656 -random,memory,1840919,phmap::parallel_flat_hash_map_mt,73859072 -random,memory,1841584,phmap::parallel_flat_hash_map_mt,73859072 -random,memory,1841585,phmap::parallel_flat_hash_map_mt,76095488 -random,memory,1842548,phmap::parallel_flat_hash_map_mt,78323712 -random,memory,1854401,phmap::parallel_flat_hash_map_mt,78323712 -random,memory,2039888,phmap::parallel_flat_hash_map_mt,76087296 -random,memory,3656563,phmap::parallel_flat_hash_map_mt,76091392 -random,memory,3656564,phmap::parallel_flat_hash_map_mt,85028864 -random,memory,3657600,phmap::parallel_flat_hash_map_mt,85028864 -random,memory,3657601,phmap::parallel_flat_hash_map_mt,89497600 -random,memory,3659709,phmap::parallel_flat_hash_map_mt,89497600 -random,memory,3659710,phmap::parallel_flat_hash_map_mt,93958144 -random,memory,3664287,phmap::parallel_flat_hash_map_mt,93958144 -random,memory,3664288,phmap::parallel_flat_hash_map_mt,98418688 -random,memory,3665897,phmap::parallel_flat_hash_map_mt,98418688 -random,memory,3665898,phmap::parallel_flat_hash_map_mt,102879232 -random,memory,3668708,phmap::parallel_flat_hash_map_mt,102879232 -random,memory,3668709,phmap::parallel_flat_hash_map_mt,107347968 -random,memory,3669267,phmap::parallel_flat_hash_map_mt,107347968 -random,memory,3669268,phmap::parallel_flat_hash_map_mt,111816704 -random,memory,3673233,phmap::parallel_flat_hash_map_mt,111816704 -random,memory,3673234,phmap::parallel_flat_hash_map_mt,134148096 -random,memory,3673249,phmap::parallel_flat_hash_map_mt,134148096 -random,memory,3673261,phmap::parallel_flat_hash_map_mt,125210624 -random,memory,3673702,phmap::parallel_flat_hash_map_mt,125210624 -random,memory,3673703,phmap::parallel_flat_hash_map_mt,129679360 -random,memory,3674703,phmap::parallel_flat_hash_map_mt,129679360 -random,memory,3674704,phmap::parallel_flat_hash_map_mt,134139904 -random,memory,3675513,phmap::parallel_flat_hash_map_mt,134139904 -random,memory,3675514,phmap::parallel_flat_hash_map_mt,138600448 -random,memory,3677599,phmap::parallel_flat_hash_map_mt,138600448 -random,memory,3677600,phmap::parallel_flat_hash_map_mt,143073280 -random,memory,3678958,phmap::parallel_flat_hash_map_mt,143073280 -random,memory,3678959,phmap::parallel_flat_hash_map_mt,147537920 -random,memory,3679455,phmap::parallel_flat_hash_map_mt,147537920 -random,memory,3679456,phmap::parallel_flat_hash_map_mt,152006656 -random,memory,3829988,phmap::parallel_flat_hash_map_mt,152006656 -random,memory,3830019,phmap::parallel_flat_hash_map_mt,147537920 -random,memory,7327631,phmap::parallel_flat_hash_map_mt,147537920 -random,memory,7327632,phmap::parallel_flat_hash_map_mt,183267328 -random,memory,7327726,phmap::parallel_flat_hash_map_mt,183267328 -random,memory,7327727,phmap::parallel_flat_hash_map_mt,174333952 -random,memory,7328167,phmap::parallel_flat_hash_map_mt,174333952 -random,memory,7328168,phmap::parallel_flat_hash_map_mt,183263232 -random,memory,7332754,phmap::parallel_flat_hash_map_mt,183263232 -random,memory,7332755,phmap::parallel_flat_hash_map_mt,192196608 -random,memory,7334286,phmap::parallel_flat_hash_map_mt,192196608 -random,memory,7334287,phmap::parallel_flat_hash_map_mt,201121792 -random,memory,7336335,phmap::parallel_flat_hash_map_mt,201121792 -random,memory,7336336,phmap::parallel_flat_hash_map_mt,210059264 -random,memory,7337725,phmap::parallel_flat_hash_map_mt,210059264 -random,memory,7337726,phmap::parallel_flat_hash_map_mt,218984448 -random,memory,7339260,phmap::parallel_flat_hash_map_mt,218984448 -random,memory,7339261,phmap::parallel_flat_hash_map_mt,227909632 -random,memory,7340643,phmap::parallel_flat_hash_map_mt,227909632 -random,memory,7340644,phmap::parallel_flat_hash_map_mt,236838912 -random,memory,7342329,phmap::parallel_flat_hash_map_mt,236838912 -random,memory,7342330,phmap::parallel_flat_hash_map_mt,245776384 -random,memory,7343707,phmap::parallel_flat_hash_map_mt,245776384 -random,memory,7343708,phmap::parallel_flat_hash_map_mt,254709760 -random,memory,7344092,phmap::parallel_flat_hash_map_mt,254709760 -random,memory,7344093,phmap::parallel_flat_hash_map_mt,263639040 -random,memory,7344454,phmap::parallel_flat_hash_map_mt,263639040 -random,memory,7344529,phmap::parallel_flat_hash_map_mt,254705664 -random,memory,7349960,phmap::parallel_flat_hash_map_mt,254705664 -random,memory,7349961,phmap::parallel_flat_hash_map_mt,272572416 -random,memory,7350165,phmap::parallel_flat_hash_map_mt,272572416 -random,memory,7350166,phmap::parallel_flat_hash_map_mt,281497600 -random,memory,7353807,phmap::parallel_flat_hash_map_mt,281497600 -random,memory,7353808,phmap::parallel_flat_hash_map_mt,290430976 -random,memory,7354809,phmap::parallel_flat_hash_map_mt,290430976 -random,memory,7354810,phmap::parallel_flat_hash_map_mt,299356160 -random,memory,7359696,phmap::parallel_flat_hash_map_mt,299356160 -random,memory,7359779,phmap::parallel_flat_hash_map_mt,290422784 -random,time,10000000,phmap::parallel_flat_hash_map_mt,0.788000 -random,memory,10043554,phmap::parallel_flat_hash_map_mt,288821248 -random,memory,14660773,phmap::parallel_flat_hash_map_mt,288821248 -random,memory,14660774,phmap::parallel_flat_hash_map_mt,324546560 -random,memory,14668394,phmap::parallel_flat_hash_map_mt,324546560 -random,memory,14668395,phmap::parallel_flat_hash_map_mt,342409216 -random,memory,14670081,phmap::parallel_flat_hash_map_mt,342409216 -random,memory,14670082,phmap::parallel_flat_hash_map_mt,360271872 -random,memory,14670721,phmap::parallel_flat_hash_map_mt,360271872 -random,memory,14670722,phmap::parallel_flat_hash_map_mt,378126336 -random,memory,14673268,phmap::parallel_flat_hash_map_mt,378126336 -random,memory,14673269,phmap::parallel_flat_hash_map_mt,395984896 -random,memory,14675927,phmap::parallel_flat_hash_map_mt,395984896 -random,memory,14675929,phmap::parallel_flat_hash_map_mt,413843456 -random,memory,14677274,phmap::parallel_flat_hash_map_mt,413843456 -random,memory,14677275,phmap::parallel_flat_hash_map_mt,431706112 -random,memory,14679690,phmap::parallel_flat_hash_map_mt,431706112 -random,memory,14679691,phmap::parallel_flat_hash_map_mt,449564672 -random,memory,14680028,phmap::parallel_flat_hash_map_mt,449564672 -random,memory,14680031,phmap::parallel_flat_hash_map_mt,431702016 -random,memory,14682548,phmap::parallel_flat_hash_map_mt,431702016 -random,memory,14682549,phmap::parallel_flat_hash_map_mt,467423232 -random,memory,14683242,phmap::parallel_flat_hash_map_mt,467423232 -random,memory,14683243,phmap::parallel_flat_hash_map_mt,485289984 -random,memory,14684510,phmap::parallel_flat_hash_map_mt,485289984 -random,memory,14684511,phmap::parallel_flat_hash_map_mt,503152640 -random,memory,14685762,phmap::parallel_flat_hash_map_mt,503152640 -random,memory,14685763,phmap::parallel_flat_hash_map_mt,521015296 -random,memory,14687183,phmap::parallel_flat_hash_map_mt,521015296 -random,memory,14687184,phmap::parallel_flat_hash_map_mt,538873856 -random,memory,14687856,phmap::parallel_flat_hash_map_mt,538873856 -random,memory,14687857,phmap::parallel_flat_hash_map_mt,556740608 -random,memory,14688774,phmap::parallel_flat_hash_map_mt,556740608 -random,memory,14688775,phmap::parallel_flat_hash_map_mt,574595072 -random,memory,14708553,phmap::parallel_flat_hash_map_mt,574595072 -random,memory,14708554,phmap::parallel_flat_hash_map_mt,592453632 -random,memory,14952078,phmap::parallel_flat_hash_map_mt,592453632 -random,memory,14952197,phmap::parallel_flat_hash_map_mt,574582784 -random,time,20000000,phmap::parallel_flat_hash_map_mt,1.615000 -random,memory,20008974,phmap::parallel_flat_hash_map_mt,574586880 -random,memory,29333764,phmap::parallel_flat_hash_map_mt,574586880 -random,memory,29333765,phmap::parallel_flat_hash_map_mt,646033408 -random,memory,29334022,phmap::parallel_flat_hash_map_mt,646033408 -random,memory,29334023,phmap::parallel_flat_hash_map_mt,681766912 -random,memory,29342949,phmap::parallel_flat_hash_map_mt,681766912 -random,memory,29342950,phmap::parallel_flat_hash_map_mt,717488128 -random,memory,29345586,phmap::parallel_flat_hash_map_mt,717488128 -random,memory,29345587,phmap::parallel_flat_hash_map_mt,753205248 -random,memory,29347489,phmap::parallel_flat_hash_map_mt,753205248 -random,memory,29347490,phmap::parallel_flat_hash_map_mt,788930560 -random,memory,29348504,phmap::parallel_flat_hash_map_mt,788930560 -random,memory,29348542,phmap::parallel_flat_hash_map_mt,753209344 -random,memory,29349198,phmap::parallel_flat_hash_map_mt,753209344 -random,memory,29349199,phmap::parallel_flat_hash_map_mt,824655872 -random,memory,29352494,phmap::parallel_flat_hash_map_mt,824655872 -random,memory,29352495,phmap::parallel_flat_hash_map_mt,860377088 -random,memory,29358759,phmap::parallel_flat_hash_map_mt,860377088 -random,memory,29358760,phmap::parallel_flat_hash_map_mt,896094208 -random,memory,29361234,phmap::parallel_flat_hash_map_mt,896094208 -random,memory,29361235,phmap::parallel_flat_hash_map_mt,931811328 -random,memory,29362544,phmap::parallel_flat_hash_map_mt,931811328 -random,memory,29362545,phmap::parallel_flat_hash_map_mt,967536640 -random,memory,29369457,phmap::parallel_flat_hash_map_mt,967536640 -random,memory,29369458,phmap::parallel_flat_hash_map_mt,1003257856 -random,memory,29372921,phmap::parallel_flat_hash_map_mt,1003257856 -random,memory,29372922,phmap::parallel_flat_hash_map_mt,1038979072 -random,memory,29374936,phmap::parallel_flat_hash_map_mt,1038979072 -random,memory,29374937,phmap::parallel_flat_hash_map_mt,1074696192 -random,memory,29383512,phmap::parallel_flat_hash_map_mt,1074696192 -random,memory,29383513,phmap::parallel_flat_hash_map_mt,1110417408 -random,memory,29383512,phmap::parallel_flat_hash_map_mt,1110417408 -random,memory,29383513,phmap::parallel_flat_hash_map_mt,1181863936 -random,memory,29474438,phmap::parallel_flat_hash_map_mt,1181851648 -random,memory,29475679,phmap::parallel_flat_hash_map_mt,1146126336 -random,time,30000000,phmap::parallel_flat_hash_map_mt,2.718000 -random,memory,30039213,phmap::parallel_flat_hash_map_mt,1146114048 -random,time,40000000,phmap::parallel_flat_hash_map_mt,3.314000 -random,memory,40063406,phmap::parallel_flat_hash_map_mt,1146126336 -random,time,50000000,phmap::parallel_flat_hash_map_mt,3.930000 -random,memory,50000796,phmap::parallel_flat_hash_map_mt,1146003456 -random,memory,58662905,phmap::parallel_flat_hash_map_mt,1146118144 -random,memory,58662906,phmap::parallel_flat_hash_map_mt,1289007104 -random,memory,58690501,phmap::parallel_flat_hash_map_mt,1289007104 -random,memory,58690502,phmap::parallel_flat_hash_map_mt,1360453632 -random,memory,58698182,phmap::parallel_flat_hash_map_mt,1360453632 -random,memory,58698183,phmap::parallel_flat_hash_map_mt,1431904256 -random,memory,58704661,phmap::parallel_flat_hash_map_mt,1431904256 -random,memory,58704662,phmap::parallel_flat_hash_map_mt,1503346688 -random,memory,58708120,phmap::parallel_flat_hash_map_mt,1503346688 -random,memory,58708121,phmap::parallel_flat_hash_map_mt,1574785024 -random,memory,58718704,phmap::parallel_flat_hash_map_mt,1574785024 -random,memory,58718705,phmap::parallel_flat_hash_map_mt,1646223360 -random,memory,58720588,phmap::parallel_flat_hash_map_mt,1646223360 -random,memory,58720589,phmap::parallel_flat_hash_map_mt,1860554752 -random,memory,58720613,phmap::parallel_flat_hash_map_mt,1860554752 -random,memory,58720614,phmap::parallel_flat_hash_map_mt,1789112320 -random,memory,58721807,phmap::parallel_flat_hash_map_mt,1789112320 -random,memory,58721808,phmap::parallel_flat_hash_map_mt,1860558848 -random,memory,58722210,phmap::parallel_flat_hash_map_mt,1860558848 -random,memory,58722854,phmap::parallel_flat_hash_map_mt,1789112320 -random,memory,58722853,phmap::parallel_flat_hash_map_mt,1789112320 -random,memory,58722854,phmap::parallel_flat_hash_map_mt,1931997184 -random,memory,58723270,phmap::parallel_flat_hash_map_mt,1931997184 -random,memory,58723271,phmap::parallel_flat_hash_map_mt,2003435520 -random,memory,58725728,phmap::parallel_flat_hash_map_mt,2003435520 -random,memory,58725729,phmap::parallel_flat_hash_map_mt,2074873856 -random,memory,58729127,phmap::parallel_flat_hash_map_mt,2074873856 -random,memory,58729128,phmap::parallel_flat_hash_map_mt,2146328576 -random,memory,58738897,phmap::parallel_flat_hash_map_mt,2146328576 -random,memory,58738898,phmap::parallel_flat_hash_map_mt,2217775104 -random,memory,58747088,phmap::parallel_flat_hash_map_mt,2217775104 -random,memory,58747089,phmap::parallel_flat_hash_map_mt,2289213440 -random,memory,58790495,phmap::parallel_flat_hash_map_mt,2289213440 -random,memory,58790496,phmap::parallel_flat_hash_map_mt,2360659968 -random,memory,58912378,phmap::parallel_flat_hash_map_mt,2360659968 -random,memory,58915332,phmap::parallel_flat_hash_map_mt,2289213440 -random,time,60000000,phmap::parallel_flat_hash_map_mt,5.665000 -random,memory,60121415,phmap::parallel_flat_hash_map_mt,2289217536 -random,time,70000000,phmap::parallel_flat_hash_map_mt,6.304000 -random,memory,70000000,phmap::parallel_flat_hash_map_mt,2289008640 -random,time,80000000,phmap::parallel_flat_hash_map_mt,6.942000 -random,memory,80007185,phmap::parallel_flat_hash_map_mt,2289209344 -random,time,90000000,phmap::parallel_flat_hash_map_mt,7.608000 -random,memory,90008872,phmap::parallel_flat_hash_map_mt,2289201152 -random,time,100000000,phmap::parallel_flat_hash_map_mt,8.286000 diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_flat_par_mutex_5 b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_flat_par_mutex_5 deleted file mode 100644 index adcfe3d..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_flat_par_mutex_5 +++ /dev/null @@ -1,315 +0,0 @@ -random,memory,0,absl::flat_hash_map,15745024 -random,memory,238685,absl::flat_hash_map,15745024 -random,memory,238809,absl::flat_hash_map,11276288 -random,memory,458751,absl::flat_hash_map,11276288 -random,memory,458752,absl::flat_hash_map,29143040 -random,memory,485817,absl::flat_hash_map,29143040 -random,memory,486085,absl::flat_hash_map,20209664 -random,memory,675678,absl::flat_hash_map,20209664 -random,memory,675893,absl::flat_hash_map,19091456 -random,memory,917503,absl::flat_hash_map,19091456 -random,memory,917504,absl::flat_hash_map,54816768 -random,memory,927664,absl::flat_hash_map,54816768 -random,memory,927716,absl::flat_hash_map,36950016 -random,memory,1835007,absl::flat_hash_map,36950016 -random,memory,1835008,absl::flat_hash_map,108396544 -random,memory,1855763,absl::flat_hash_map,108396544 -random,memory,1855867,absl::flat_hash_map,72671232 -random,memory,3670015,absl::flat_hash_map,72671232 -random,memory,3670016,absl::flat_hash_map,215560192 -random,memory,3677233,absl::flat_hash_map,215560192 -random,memory,3677313,absl::flat_hash_map,144113664 -random,memory,7340031,absl::flat_hash_map,144113664 -random,memory,7340032,absl::flat_hash_map,429887488 -random,memory,7344858,absl::flat_hash_map,429887488 -random,memory,7344912,absl::flat_hash_map,286998528 -random,time,10000000,absl::flat_hash_map,0.935000 -random,memory,10007722,absl::flat_hash_map,286998528 -random,memory,14680063,absl::flat_hash_map,286998528 -random,memory,14680064,absl::flat_hash_map,858546176 -random,memory,14682782,absl::flat_hash_map,858546176 -random,memory,14682830,absl::flat_hash_map,572768256 -random,time,20000000,absl::flat_hash_map,2.244000 -random,memory,20010261,absl::flat_hash_map,572768256 -random,memory,29360127,absl::flat_hash_map,572768256 -random,memory,29360128,absl::flat_hash_map,1715859456 -random,memory,29363189,absl::flat_hash_map,1715859456 -random,memory,29363226,absl::flat_hash_map,1144311808 -random,time,30000000,absl::flat_hash_map,3.899000 -random,memory,30008430,absl::flat_hash_map,1144311808 -random,time,40000000,absl::flat_hash_map,5.165000 -random,memory,40003909,absl::flat_hash_map,1144311808 -random,time,50000000,absl::flat_hash_map,6.428000 -random,memory,50001112,absl::flat_hash_map,1144311808 -random,memory,58720255,absl::flat_hash_map,1144311808 -random,memory,58720256,absl::flat_hash_map,3430490112 -random,memory,58728488,absl::flat_hash_map,3430490112 -random,memory,58728529,absl::flat_hash_map,2287411200 -random,time,60000000,absl::flat_hash_map,8.813000 -random,memory,60004544,absl::flat_hash_map,2287411200 -random,time,70000000,absl::flat_hash_map,10.289000 -random,memory,70001141,absl::flat_hash_map,2287411200 -random,time,80000000,absl::flat_hash_map,11.765000 -random,memory,80002276,absl::flat_hash_map,2287411200 -random,time,90000000,absl::flat_hash_map,13.235000 -random,memory,90011453,absl::flat_hash_map,2287411200 -random,time,100000000,absl::flat_hash_map,14.683000 -random,memory,0,absl::parallel_flat_hash_map_mt,8097792 -random,memory,226683,absl::parallel_flat_hash_map_mt,8101888 -random,memory,226684,absl::parallel_flat_hash_map_mt,9236480 -random,memory,233565,absl::parallel_flat_hash_map_mt,9236480 -random,memory,233647,absl::parallel_flat_hash_map_mt,12922880 -random,memory,274958,absl::parallel_flat_hash_map_mt,13205504 -random,memory,454560,absl::parallel_flat_hash_map_mt,13205504 -random,memory,454569,absl::parallel_flat_hash_map_mt,15454208 -random,memory,458135,absl::parallel_flat_hash_map_mt,15454208 -random,memory,458136,absl::parallel_flat_hash_map_mt,20520960 -random,memory,464748,absl::parallel_flat_hash_map_mt,20520960 -random,memory,464749,absl::parallel_flat_hash_map_mt,23891968 -random,memory,585537,absl::parallel_flat_hash_map_mt,24461312 -random,memory,904457,absl::parallel_flat_hash_map_mt,24461312 -random,memory,904458,absl::parallel_flat_hash_map_mt,25587712 -random,memory,906298,absl::parallel_flat_hash_map_mt,25587712 -random,memory,906305,absl::parallel_flat_hash_map_mt,22061056 -random,memory,912070,absl::parallel_flat_hash_map_mt,22061056 -random,memory,912071,absl::parallel_flat_hash_map_mt,25481216 -random,memory,913175,absl::parallel_flat_hash_map_mt,25481216 -random,memory,913176,absl::parallel_flat_hash_map_mt,28229632 -random,memory,916122,absl::parallel_flat_hash_map_mt,28229632 -random,memory,916123,absl::parallel_flat_hash_map_mt,29917184 -random,memory,917759,absl::parallel_flat_hash_map_mt,29917184 -random,memory,917760,absl::parallel_flat_hash_map_mt,31617024 -random,memory,921180,absl::parallel_flat_hash_map_mt,31617024 -random,memory,921181,absl::parallel_flat_hash_map_mt,34426880 -random,memory,923206,absl::parallel_flat_hash_map_mt,34426880 -random,memory,923207,absl::parallel_flat_hash_map_mt,37249024 -random,memory,932268,absl::parallel_flat_hash_map_mt,37249024 -random,memory,932269,absl::parallel_flat_hash_map_mt,39493632 -random,memory,960264,absl::parallel_flat_hash_map_mt,39972864 -random,memory,1818080,absl::parallel_flat_hash_map_mt,39972864 -random,memory,1818081,absl::parallel_flat_hash_map_mt,42213376 -random,memory,1818368,absl::parallel_flat_hash_map_mt,43331584 -random,memory,1824622,absl::parallel_flat_hash_map_mt,44445696 -random,memory,1825724,absl::parallel_flat_hash_map_mt,44445696 -random,memory,1825725,absl::parallel_flat_hash_map_mt,46682112 -random,memory,1827670,absl::parallel_flat_hash_map_mt,47800320 -random,memory,1829247,absl::parallel_flat_hash_map_mt,47800320 -random,memory,1829248,absl::parallel_flat_hash_map_mt,52269056 -random,memory,1833459,absl::parallel_flat_hash_map_mt,52264960 -random,memory,1833460,absl::parallel_flat_hash_map_mt,56733696 -random,memory,1835812,absl::parallel_flat_hash_map_mt,56729600 -random,memory,1835813,absl::parallel_flat_hash_map_mt,60080128 -random,memory,1836350,absl::parallel_flat_hash_map_mt,60080128 -random,memory,1836351,absl::parallel_flat_hash_map_mt,63438848 -random,memory,1837412,absl::parallel_flat_hash_map_mt,64548864 -random,memory,1838938,absl::parallel_flat_hash_map_mt,64548864 -random,memory,1838939,absl::parallel_flat_hash_map_mt,66777088 -random,memory,1839594,absl::parallel_flat_hash_map_mt,67891200 -random,memory,1841385,absl::parallel_flat_hash_map_mt,67891200 -random,memory,1841386,absl::parallel_flat_hash_map_mt,70123520 -random,memory,1843499,absl::parallel_flat_hash_map_mt,70123520 -random,memory,1843500,absl::parallel_flat_hash_map_mt,74596352 -random,memory,1851800,absl::parallel_flat_hash_map_mt,76824576 -random,memory,1881249,absl::parallel_flat_hash_map_mt,76824576 -random,memory,1881322,absl::parallel_flat_hash_map_mt,75706368 -random,memory,3642650,absl::parallel_flat_hash_map_mt,75706368 -random,memory,3642651,absl::parallel_flat_hash_map_mt,80175104 -random,memory,3650189,absl::parallel_flat_hash_map_mt,82411520 -random,memory,3658302,absl::parallel_flat_hash_map_mt,84652032 -random,memory,3659792,absl::parallel_flat_hash_map_mt,86884352 -random,memory,3660327,absl::parallel_flat_hash_map_mt,89116672 -random,memory,3662326,absl::parallel_flat_hash_map_mt,89116672 -random,memory,3662327,absl::parallel_flat_hash_map_mt,95813632 -random,memory,3666371,absl::parallel_flat_hash_map_mt,95817728 -random,memory,3666372,absl::parallel_flat_hash_map_mt,102522880 -random,memory,3667092,absl::parallel_flat_hash_map_mt,104767488 -random,memory,3667485,absl::parallel_flat_hash_map_mt,104767488 -random,memory,3667486,absl::parallel_flat_hash_map_mt,111464448 -random,memory,3668276,absl::parallel_flat_hash_map_mt,111464448 -random,memory,3668277,absl::parallel_flat_hash_map_mt,115920896 -random,memory,3670350,absl::parallel_flat_hash_map_mt,118145024 -random,memory,3671585,absl::parallel_flat_hash_map_mt,120381440 -random,memory,3672959,absl::parallel_flat_hash_map_mt,122617856 -random,memory,3673809,absl::parallel_flat_hash_map_mt,122617856 -random,memory,3673810,absl::parallel_flat_hash_map_mt,129314816 -random,memory,3675897,absl::parallel_flat_hash_map_mt,131547136 -random,memory,3676555,absl::parallel_flat_hash_map_mt,133783552 -random,memory,3676779,absl::parallel_flat_hash_map_mt,136015872 -random,memory,3677839,absl::parallel_flat_hash_map_mt,138244096 -random,memory,3678355,absl::parallel_flat_hash_map_mt,140472320 -random,memory,3678866,absl::parallel_flat_hash_map_mt,142700544 -random,memory,3690202,absl::parallel_flat_hash_map_mt,142700544 -random,memory,3690203,absl::parallel_flat_hash_map_mt,149397504 -random,memory,3722970,absl::parallel_flat_hash_map_mt,149393408 -random,memory,3723091,absl::parallel_flat_hash_map_mt,147161088 -random,memory,7304291,absl::parallel_flat_hash_map_mt,147161088 -random,memory,7304305,absl::parallel_flat_hash_map_mt,156094464 -random,memory,7317618,absl::parallel_flat_hash_map_mt,160567296 -random,memory,7321953,absl::parallel_flat_hash_map_mt,165027840 -random,memory,7324203,absl::parallel_flat_hash_map_mt,169492480 -random,memory,7327095,absl::parallel_flat_hash_map_mt,173953024 -random,memory,7329008,absl::parallel_flat_hash_map_mt,178417664 -random,memory,7330112,absl::parallel_flat_hash_map_mt,182886400 -random,memory,7330870,absl::parallel_flat_hash_map_mt,187346944 -random,memory,7332208,absl::parallel_flat_hash_map_mt,191815680 -random,memory,7332866,absl::parallel_flat_hash_map_mt,196280320 -random,memory,7333480,absl::parallel_flat_hash_map_mt,196280320 -random,memory,7333481,absl::parallel_flat_hash_map_mt,209686528 -random,memory,7336329,absl::parallel_flat_hash_map_mt,214155264 -random,memory,7337014,absl::parallel_flat_hash_map_mt,218624000 -random,memory,7338084,absl::parallel_flat_hash_map_mt,223092736 -random,memory,7338525,absl::parallel_flat_hash_map_mt,227553280 -random,memory,7340267,absl::parallel_flat_hash_map_mt,232026112 -random,memory,7340739,absl::parallel_flat_hash_map_mt,236490752 -random,memory,7342416,absl::parallel_flat_hash_map_mt,240955392 -random,memory,7343877,absl::parallel_flat_hash_map_mt,245415936 -random,memory,7348892,absl::parallel_flat_hash_map_mt,245415936 -random,memory,7348911,absl::parallel_flat_hash_map_mt,240947200 -random,memory,7349331,absl::parallel_flat_hash_map_mt,240947200 -random,memory,7349332,absl::parallel_flat_hash_map_mt,249876480 -random,memory,7349765,absl::parallel_flat_hash_map_mt,254345216 -random,memory,7349943,absl::parallel_flat_hash_map_mt,258818048 -random,memory,7351616,absl::parallel_flat_hash_map_mt,263278592 -random,memory,7352328,absl::parallel_flat_hash_map_mt,267739136 -random,memory,7352954,absl::parallel_flat_hash_map_mt,272199680 -random,memory,7353831,absl::parallel_flat_hash_map_mt,276664320 -random,memory,7360307,absl::parallel_flat_hash_map_mt,281124864 -random,memory,7360322,absl::parallel_flat_hash_map_mt,281124864 -random,memory,7360340,absl::parallel_flat_hash_map_mt,276656128 -random,memory,7362083,absl::parallel_flat_hash_map_mt,276656128 -random,memory,7362084,absl::parallel_flat_hash_map_mt,285589504 -random,memory,7368696,absl::parallel_flat_hash_map_mt,290050048 -random,memory,7370511,absl::parallel_flat_hash_map_mt,294510592 -random,memory,7545581,absl::parallel_flat_hash_map_mt,294510592 -random,memory,7545600,absl::parallel_flat_hash_map_mt,290041856 -random,time,10000000,absl::parallel_flat_hash_map_mt,0.685000 -random,memory,10125610,absl::parallel_flat_hash_map_mt,290045952 -random,memory,14624490,absl::parallel_flat_hash_map_mt,290045952 -random,memory,14624491,absl::parallel_flat_hash_map_mt,307908608 -random,memory,14640456,absl::parallel_flat_hash_map_mt,316846080 -random,memory,14645771,absl::parallel_flat_hash_map_mt,325771264 -random,memory,14646996,absl::parallel_flat_hash_map_mt,334704640 -random,memory,14657141,absl::parallel_flat_hash_map_mt,334704640 -random,memory,14657247,absl::parallel_flat_hash_map_mt,325775360 -random,memory,14658963,absl::parallel_flat_hash_map_mt,325775360 -random,memory,14658964,absl::parallel_flat_hash_map_mt,343642112 -random,memory,14659931,absl::parallel_flat_hash_map_mt,352571392 -random,memory,14665267,absl::parallel_flat_hash_map_mt,352571392 -random,memory,14665268,absl::parallel_flat_hash_map_mt,379363328 -random,memory,14670387,absl::parallel_flat_hash_map_mt,379363328 -random,memory,14670388,absl::parallel_flat_hash_map_mt,397221888 -random,memory,14671763,absl::parallel_flat_hash_map_mt,397221888 -random,memory,14671764,absl::parallel_flat_hash_map_mt,424009728 -random,memory,14677622,absl::parallel_flat_hash_map_mt,432939008 -random,memory,14680084,absl::parallel_flat_hash_map_mt,441864192 -random,memory,14680689,absl::parallel_flat_hash_map_mt,450789376 -random,memory,14683195,absl::parallel_flat_hash_map_mt,459726848 -random,memory,14683950,absl::parallel_flat_hash_map_mt,468652032 -random,memory,14688089,absl::parallel_flat_hash_map_mt,477581312 -random,memory,14691193,absl::parallel_flat_hash_map_mt,486514688 -random,memory,14694936,absl::parallel_flat_hash_map_mt,495443968 -random,memory,14696388,absl::parallel_flat_hash_map_mt,504377344 -random,memory,14697737,absl::parallel_flat_hash_map_mt,513314816 -random,memory,14698140,absl::parallel_flat_hash_map_mt,522248192 -random,memory,14699618,absl::parallel_flat_hash_map_mt,531177472 -random,memory,14700009,absl::parallel_flat_hash_map_mt,540110848 -random,memory,14700466,absl::parallel_flat_hash_map_mt,540110848 -random,memory,14700467,absl::parallel_flat_hash_map_mt,566910976 -random,memory,14716491,absl::parallel_flat_hash_map_mt,575832064 -random,memory,14739622,absl::parallel_flat_hash_map_mt,584757248 -random,memory,14831157,absl::parallel_flat_hash_map_mt,584757248 -random,memory,14831255,absl::parallel_flat_hash_map_mt,575823872 -random,time,20000000,absl::parallel_flat_hash_map_mt,1.386000 -random,memory,20162525,absl::parallel_flat_hash_map_mt,575811584 -random,memory,29290166,absl::parallel_flat_hash_map_mt,575811584 -random,memory,29290167,absl::parallel_flat_hash_map_mt,611536896 -random,memory,29290166,absl::parallel_flat_hash_map_mt,611536896 -random,memory,29290167,absl::parallel_flat_hash_map_mt,647262208 -random,memory,29308217,absl::parallel_flat_hash_map_mt,665128960 -random,memory,29316853,absl::parallel_flat_hash_map_mt,682983424 -random,memory,29322818,absl::parallel_flat_hash_map_mt,682983424 -random,memory,29322819,absl::parallel_flat_hash_map_mt,736567296 -random,memory,29335771,absl::parallel_flat_hash_map_mt,754425856 -random,memory,29341930,absl::parallel_flat_hash_map_mt,772284416 -random,memory,29343145,absl::parallel_flat_hash_map_mt,790142976 -random,memory,29350232,absl::parallel_flat_hash_map_mt,808005632 -random,memory,29354162,absl::parallel_flat_hash_map_mt,825868288 -random,memory,29356843,absl::parallel_flat_hash_map_mt,843722752 -random,memory,29362327,absl::parallel_flat_hash_map_mt,843722752 -random,memory,29362363,absl::parallel_flat_hash_map_mt,825860096 -random,memory,29366343,absl::parallel_flat_hash_map_mt,825860096 -random,memory,29366344,absl::parallel_flat_hash_map_mt,861585408 -random,memory,29369094,absl::parallel_flat_hash_map_mt,879448064 -random,memory,29370268,absl::parallel_flat_hash_map_mt,897310720 -random,memory,29370522,absl::parallel_flat_hash_map_mt,897310720 -random,memory,29370585,absl::parallel_flat_hash_map_mt,879452160 -random,memory,29371240,absl::parallel_flat_hash_map_mt,879452160 -random,memory,29371241,absl::parallel_flat_hash_map_mt,915177472 -random,memory,29377637,absl::parallel_flat_hash_map_mt,933036032 -random,memory,29378552,absl::parallel_flat_hash_map_mt,950894592 -random,memory,29380612,absl::parallel_flat_hash_map_mt,968757248 -random,memory,29382146,absl::parallel_flat_hash_map_mt,986615808 -random,memory,29383649,absl::parallel_flat_hash_map_mt,1004474368 -random,memory,29386774,absl::parallel_flat_hash_map_mt,1022328832 -random,memory,29391253,absl::parallel_flat_hash_map_mt,1040187392 -random,memory,29392067,absl::parallel_flat_hash_map_mt,1058045952 -random,memory,29393223,absl::parallel_flat_hash_map_mt,1075908608 -random,memory,29397664,absl::parallel_flat_hash_map_mt,1093763072 -random,memory,29397820,absl::parallel_flat_hash_map_mt,1111621632 -random,memory,29402968,absl::parallel_flat_hash_map_mt,1129480192 -random,memory,29416940,absl::parallel_flat_hash_map_mt,1147346944 -random,memory,29425034,absl::parallel_flat_hash_map_mt,1165201408 -random,memory,29437495,absl::parallel_flat_hash_map_mt,1165201408 -random,memory,29437615,absl::parallel_flat_hash_map_mt,1147342848 -random,time,30000000,absl::parallel_flat_hash_map_mt,2.397000 -random,memory,30000064,absl::parallel_flat_hash_map_mt,1147179008 -random,time,40000000,absl::parallel_flat_hash_map_mt,2.894000 -random,memory,40092788,absl::parallel_flat_hash_map_mt,1147342848 -random,time,50000000,absl::parallel_flat_hash_map_mt,3.419000 -random,memory,50016621,absl::parallel_flat_hash_map_mt,1147351040 -random,memory,58617730,absl::parallel_flat_hash_map_mt,1147351040 -random,memory,58617731,absl::parallel_flat_hash_map_mt,1218797568 -random,memory,58624447,absl::parallel_flat_hash_map_mt,1254522880 -random,memory,58647163,absl::parallel_flat_hash_map_mt,1290244096 -random,memory,58652294,absl::parallel_flat_hash_map_mt,1290244096 -random,memory,58652331,absl::parallel_flat_hash_map_mt,1254518784 -random,memory,58657858,absl::parallel_flat_hash_map_mt,1254518784 -random,memory,58657859,absl::parallel_flat_hash_map_mt,1325965312 -random,memory,58672238,absl::parallel_flat_hash_map_mt,1361690624 -random,memory,58684143,absl::parallel_flat_hash_map_mt,1361690624 -random,memory,58684144,absl::parallel_flat_hash_map_mt,1468854272 -random,memory,58686216,absl::parallel_flat_hash_map_mt,1504571392 -random,memory,58705047,absl::parallel_flat_hash_map_mt,1540292608 -random,memory,58707030,absl::parallel_flat_hash_map_mt,1576017920 -random,memory,58713785,absl::parallel_flat_hash_map_mt,1611739136 -random,memory,58719051,absl::parallel_flat_hash_map_mt,1647460352 -random,memory,58719339,absl::parallel_flat_hash_map_mt,1683181568 -random,memory,58721442,absl::parallel_flat_hash_map_mt,1718906880 -random,memory,58722510,absl::parallel_flat_hash_map_mt,1754628096 -random,memory,58731866,absl::parallel_flat_hash_map_mt,1790353408 -random,memory,58732950,absl::parallel_flat_hash_map_mt,1826086912 -random,memory,58733354,absl::parallel_flat_hash_map_mt,1861804032 -random,memory,58734852,absl::parallel_flat_hash_map_mt,1897525248 -random,memory,58735358,absl::parallel_flat_hash_map_mt,1933242368 -random,memory,58739746,absl::parallel_flat_hash_map_mt,1968967680 -random,memory,58739993,absl::parallel_flat_hash_map_mt,2004692992 -random,memory,58745310,absl::parallel_flat_hash_map_mt,2040410112 -random,memory,58746926,absl::parallel_flat_hash_map_mt,2076135424 -random,memory,58747125,absl::parallel_flat_hash_map_mt,2111860736 -random,memory,58756624,absl::parallel_flat_hash_map_mt,2147586048 -random,memory,58769646,absl::parallel_flat_hash_map_mt,2183303168 -random,memory,58771866,absl::parallel_flat_hash_map_mt,2219024384 -random,memory,58789875,absl::parallel_flat_hash_map_mt,2254741504 -random,memory,58798355,absl::parallel_flat_hash_map_mt,2290466816 -random,memory,58824040,absl::parallel_flat_hash_map_mt,2326183936 -random,memory,59024387,absl::parallel_flat_hash_map_mt,2326183936 -random,memory,59024466,absl::parallel_flat_hash_map_mt,2290458624 -random,time,60000000,absl::parallel_flat_hash_map_mt,4.973000 -random,memory,60000000,absl::parallel_flat_hash_map_mt,2290229248 -random,time,70000000,absl::parallel_flat_hash_map_mt,5.544000 -random,memory,70133805,absl::parallel_flat_hash_map_mt,2290458624 -random,time,80000000,absl::parallel_flat_hash_map_mt,6.054000 -random,memory,80033533,absl::parallel_flat_hash_map_mt,2290458624 -random,time,90000000,absl::parallel_flat_hash_map_mt,6.624000 -random,memory,90023483,absl::parallel_flat_hash_map_mt,2290458624 -random,time,100000000,absl::parallel_flat_hash_map_mt,7.175000 diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_mt_par_only b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_mt_par_only deleted file mode 100644 index 30ed8e4..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_mt_par_only +++ /dev/null @@ -1,104 +0,0 @@ -random,memory,0,absl::parallel_flat_hash_map,41910272 -random,memory,924422,absl::parallel_flat_hash_map,41910272 -random,memory,924433,absl::parallel_flat_hash_map,44146688 -random,memory,952903,absl::parallel_flat_hash_map,44146688 -random,memory,953013,absl::parallel_flat_hash_map,40779776 -random,memory,1010078,absl::parallel_flat_hash_map,40779776 -random,memory,1010200,absl::parallel_flat_hash_map,38539264 -random,memory,1863512,absl::parallel_flat_hash_map,38539264 -random,memory,1863540,absl::parallel_flat_hash_map,85454848 -random,memory,2941624,absl::parallel_flat_hash_map,85454848 -random,memory,2941816,absl::parallel_flat_hash_map,74272768 -random,memory,3683917,absl::parallel_flat_hash_map,74272768 -random,memory,3683918,absl::parallel_flat_hash_map,154685440 -random,memory,3692980,absl::parallel_flat_hash_map,154685440 -random,memory,3693011,absl::parallel_flat_hash_map,168075264 -random,memory,4474443,absl::parallel_flat_hash_map,168075264 -random,memory,4474556,absl::parallel_flat_hash_map,145719296 -random,memory,6754095,absl::parallel_flat_hash_map,145707008 -random,memory,6754147,absl::parallel_flat_hash_map,226099200 -random,memory,7294556,absl::parallel_flat_hash_map,226099200 -random,memory,7294670,absl::parallel_flat_hash_map,235036672 -random,memory,7983489,absl::parallel_flat_hash_map,235036672 -random,memory,7983651,absl::parallel_flat_hash_map,261820416 -random,memory,8242552,absl::parallel_flat_hash_map,261820416 -random,memory,8242691,absl::parallel_flat_hash_map,252887040 -random,memory,8949833,absl::parallel_flat_hash_map,252887040 -random,memory,8949892,absl::parallel_flat_hash_map,305504256 -random,memory,8987991,absl::parallel_flat_hash_map,305475584 -random,memory,8988323,absl::parallel_flat_hash_map,296538112 -random,memory,9004502,absl::parallel_flat_hash_map,296538112 -random,memory,9004594,absl::parallel_flat_hash_map,287604736 -random,time,10000000,absl::parallel_flat_hash_map,0.471000 -random,memory,10862319,absl::parallel_flat_hash_map,287756288 -random,memory,14670115,absl::parallel_flat_hash_map,287756288 -random,memory,14670116,absl::parallel_flat_hash_map,573566976 -random,memory,14678079,absl::parallel_flat_hash_map,573566976 -random,memory,14678102,absl::parallel_flat_hash_map,627142656 -random,memory,14690163,absl::parallel_flat_hash_map,627142656 -random,memory,14690164,absl::parallel_flat_hash_map,716455936 -random,memory,15024573,absl::parallel_flat_hash_map,716455936 -random,memory,15024686,absl::parallel_flat_hash_map,591388672 -random,memory,15476837,absl::parallel_flat_hash_map,591388672 -random,memory,15476994,absl::parallel_flat_hash_map,573521920 -random,time,20000000,absl::parallel_flat_hash_map,0.956000 -random,memory,20410280,absl::parallel_flat_hash_map,573497344 -random,memory,29273379,absl::parallel_flat_hash_map,573497344 -random,memory,29273380,absl::parallel_flat_hash_map,1073627136 -random,memory,29353638,absl::parallel_flat_hash_map,1073627136 -random,memory,29353639,absl::parallel_flat_hash_map,1252241408 -random,memory,29686553,absl::parallel_flat_hash_map,1287847936 -random,memory,29688294,absl::parallel_flat_hash_map,1287847936 -random,memory,29688316,absl::parallel_flat_hash_map,1252122624 -random,memory,29707720,absl::parallel_flat_hash_map,1252122624 -random,memory,29707784,absl::parallel_flat_hash_map,1180663808 -random,memory,29924480,absl::parallel_flat_hash_map,1180577792 -random,memory,29924509,absl::parallel_flat_hash_map,1144856576 -random,time,30000000,absl::parallel_flat_hash_map,1.567000 -random,memory,30708549,absl::parallel_flat_hash_map,1145061376 -random,time,40000000,absl::parallel_flat_hash_map,1.993000 -random,memory,40790845,absl::parallel_flat_hash_map,1145049088 -random,time,50000000,absl::parallel_flat_hash_map,2.423000 -random,memory,50009479,absl::parallel_flat_hash_map,1145065472 -random,memory,58110357,absl::parallel_flat_hash_map,1145065472 -random,memory,58110423,absl::parallel_flat_hash_map,1716617216 -random,memory,58545607,absl::parallel_flat_hash_map,1716617216 -random,memory,58545651,absl::parallel_flat_hash_map,1859506176 -random,memory,58671713,absl::parallel_flat_hash_map,1859506176 -random,memory,58672142,absl::parallel_flat_hash_map,2002403328 -random,memory,58697955,absl::parallel_flat_hash_map,2002403328 -random,memory,58697956,absl::parallel_flat_hash_map,2288181248 -random,memory,58704727,absl::parallel_flat_hash_map,2288181248 -random,memory,58704728,absl::parallel_flat_hash_map,2359627776 -random,memory,58705597,absl::parallel_flat_hash_map,2359627776 -random,memory,58705598,absl::parallel_flat_hash_map,2288181248 -random,memory,58730957,absl::parallel_flat_hash_map,2288181248 -random,memory,58730958,absl::parallel_flat_hash_map,2573950976 -random,memory,58736077,absl::parallel_flat_hash_map,2645401600 -random,memory,58736169,absl::parallel_flat_hash_map,2716848128 -random,memory,58742702,absl::parallel_flat_hash_map,2716848128 -random,memory,58742703,absl::parallel_flat_hash_map,2859741184 -random,memory,58787870,absl::parallel_flat_hash_map,2859741184 -random,memory,58787918,absl::parallel_flat_hash_map,2716848128 -random,memory,58863920,absl::parallel_flat_hash_map,2716848128 -random,memory,58863994,absl::parallel_flat_hash_map,2645397504 -random,memory,59087411,absl::parallel_flat_hash_map,2645397504 -random,memory,59087521,absl::parallel_flat_hash_map,2573955072 -random,memory,59355340,absl::parallel_flat_hash_map,2573877248 -random,memory,59355391,absl::parallel_flat_hash_map,2502426624 -random,memory,59387965,absl::parallel_flat_hash_map,2502426624 -random,memory,59388025,absl::parallel_flat_hash_map,2430980096 -random,memory,59484220,absl::parallel_flat_hash_map,2430951424 -random,memory,59484283,absl::parallel_flat_hash_map,2359500800 -random,memory,59503076,absl::parallel_flat_hash_map,2359500800 -random,memory,59503156,absl::parallel_flat_hash_map,2288058368 -random,time,60000000,absl::parallel_flat_hash_map,3.220000 -random,memory,60745902,absl::parallel_flat_hash_map,2288160768 -random,time,70000000,absl::parallel_flat_hash_map,3.671000 -random,memory,70722318,absl::parallel_flat_hash_map,2288156672 -random,time,80000000,absl::parallel_flat_hash_map,4.159000 -random,memory,80002281,absl::parallel_flat_hash_map,2288119808 -random,time,90000000,absl::parallel_flat_hash_map,4.615000 -random,memory,90715643,absl::parallel_flat_hash_map,2288152576 -random,time,100000000,absl::parallel_flat_hash_map,5.128000 -random,memory,100000000,absl::parallel_flat_hash_map,2287927296 diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_mt_stl_flat_par b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_mt_stl_flat_par deleted file mode 100644 index e6b94d5..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_mt_stl_flat_par +++ /dev/null @@ -1,446 +0,0 @@ -random,memory,0,std::unordered_map,6217728 -random,memory,81839,std::unordered_map,6217728 -random,memory,81866,std::unordered_map,7081984 -random,memory,96791,std::unordered_map,7081984 -random,memory,96811,std::unordered_map,7938048 -random,memory,108492,std::unordered_map,7938048 -random,memory,108510,std::unordered_map,8482816 -random,memory,119431,std::unordered_map,8482816 -random,memory,119443,std::unordered_map,9027584 -random,memory,127648,std::unordered_map,9027584 -random,memory,127666,std::unordered_map,9420800 -random,memory,131071,std::unordered_map,9420800 -random,memory,131072,std::unordered_map,11681792 -random,memory,140589,std::unordered_map,11681792 -random,memory,140596,std::unordered_map,12238848 -random,memory,152717,std::unordered_map,12238848 -random,memory,152732,std::unordered_map,12861440 -random,memory,164819,std::unordered_map,12861440 -random,memory,164832,std::unordered_map,13570048 -random,memory,176210,std::unordered_map,13570048 -random,memory,176219,std::unordered_map,14192640 -random,memory,185598,std::unordered_map,14581760 -random,memory,195438,std::unordered_map,14581760 -random,memory,195443,std::unordered_map,15126528 -random,memory,206045,std::unordered_map,15126528 -random,memory,206062,std::unordered_map,15675392 -random,memory,216715,std::unordered_map,15675392 -random,memory,216726,std::unordered_map,16220160 -random,memory,227698,std::unordered_map,16220160 -random,memory,227712,std::unordered_map,16764928 -random,memory,238365,std::unordered_map,16764928 -random,memory,238381,std::unordered_map,17309696 -random,memory,248089,std::unordered_map,17309696 -random,memory,248095,std::unordered_map,17858560 -random,memory,257442,std::unordered_map,18325504 -random,memory,262143,std::unordered_map,18325504 -random,memory,262144,std::unordered_map,26894336 -random,memory,262143,std::unordered_map,26894336 -random,memory,262144,std::unordered_map,22691840 -random,memory,273217,std::unordered_map,23314432 -random,memory,284202,std::unordered_map,23937024 -random,memory,294934,std::unordered_map,24403968 -random,memory,304770,std::unordered_map,24948736 -random,memory,314973,std::unordered_map,25497600 -random,memory,325228,std::unordered_map,26046464 -random,memory,335546,std::unordered_map,26591232 -random,memory,344776,std::unordered_map,26980352 -random,memory,353536,std::unordered_map,27525120 -random,memory,362203,std::unordered_map,27996160 -random,memory,371418,std::unordered_map,28385280 -random,memory,380977,std::unordered_map,28852224 -random,memory,390397,std::unordered_map,29396992 -random,memory,399764,std::unordered_map,29863936 -random,memory,409260,std::unordered_map,30334976 -random,memory,418252,std::unordered_map,30801920 -random,memory,426340,std::unordered_map,31191040 -random,memory,434549,std::unordered_map,31657984 -random,memory,443151,std::unordered_map,32129024 -random,memory,451873,std::unordered_map,32518144 -random,memory,460714,std::unordered_map,32985088 -random,memory,469657,std::unordered_map,33476608 -random,memory,478601,std::unordered_map,33865728 -random,memory,487440,std::unordered_map,34336768 -random,memory,494742,std::unordered_map,34725888 -random,memory,502423,std::unordered_map,35192832 -random,memory,518619,std::unordered_map,35971072 -random,memory,524287,std::unordered_map,35971072 -random,memory,524288,std::unordered_map,53104640 -random,memory,524287,std::unordered_map,53104640 -random,memory,524288,std::unordered_map,44699648 -random,memory,549272,std::unordered_map,45944832 -random,memory,558487,std::unordered_map,46411776 -random,memory,568160,std::unordered_map,46882816 -random,memory,577422,std::unordered_map,47427584 -random,memory,605424,std::unordered_map,48828416 -random,memory,640487,std::unordered_map,50622464 -random,memory,649784,std::unordered_map,51171328 -random,memory,684975,std::unordered_map,52965376 -random,memory,718904,std::unordered_map,54677504 -random,memory,753470,std::unordered_map,56393728 -random,memory,787320,std::unordered_map,58187776 -random,memory,828012,std::unordered_map,60215296 -random,memory,868058,std::unordered_map,62320640 -random,memory,906661,std::unordered_map,64348160 -random,memory,945911,std::unordered_map,66297856 -random,memory,990443,std::unordered_map,68644864 -random,memory,1036174,std::unordered_map,70983680 -random,memory,1048575,std::unordered_map,71294976 -random,memory,1048576,std::unordered_map,105234432 -random,memory,1048575,std::unordered_map,105234432 -random,memory,1048576,std::unordered_map,88424448 -random,memory,1100160,std::unordered_map,91078656 -random,memory,1160386,std::unordered_map,94117888 -random,memory,1221920,std::unordered_map,97316864 -random,memory,1282706,std::unordered_map,100433920 -random,memory,1342726,std::unordered_map,103477248 -random,memory,1401981,std::unordered_map,106594304 -random,memory,1464762,std::unordered_map,109793280 -random,memory,1530705,std::unordered_map,113143808 -random,memory,1603780,std::unordered_map,116887552 -random,memory,1674989,std::unordered_map,120631296 -random,memory,1752908,std::unordered_map,124608512 -random,memory,1826840,std::unordered_map,128352256 -random,memory,1901645,std::unordered_map,132247552 -random,memory,1981814,std::unordered_map,136306688 -random,memory,2068859,std::unordered_map,140754944 -random,memory,2097151,std::unordered_map,141922304 -random,memory,2097152,std::unordered_map,209481728 -random,memory,2097151,std::unordered_map,209481728 -random,memory,2097152,std::unordered_map,175861760 -random,memory,2207315,std::unordered_map,181477376 -random,memory,2312577,std::unordered_map,186937344 -random,memory,2423407,std::unordered_map,192659456 -random,memory,2540989,std::unordered_map,198664192 -random,memory,2659046,std::unordered_map,204750848 -random,memory,2780893,std::unordered_map,210989056 -random,memory,2908456,std::unordered_map,217616384 -random,memory,3037917,std::unordered_map,224169984 -random,memory,3170468,std::unordered_map,230952960 -random,memory,3306278,std::unordered_map,237973504 -random,memory,3450118,std::unordered_map,245383168 -random,memory,3598043,std::unordered_map,252985344 -random,memory,3747662,std::unordered_map,260706304 -random,memory,3904906,std::unordered_map,268820480 -random,memory,4066463,std::unordered_map,277086208 -random,memory,4194303,std::unordered_map,283484160 -random,memory,4194304,std::unordered_map,418127872 -random,memory,4194303,std::unordered_map,418127872 -random,memory,4194304,std::unordered_map,350887936 -random,memory,4406007,std::unordered_map,361725952 -random,memory,4618998,std::unordered_map,372645888 -random,memory,4841808,std::unordered_map,384188416 -random,memory,5068021,std::unordered_map,395784192 -random,memory,5303596,std::unordered_map,407875584 -random,memory,5541719,std::unordered_map,420122624 -random,memory,5791497,std::unordered_map,432910336 -random,memory,6050086,std::unordered_map,446242816 -random,memory,6317526,std::unordered_map,459972608 -random,memory,6591123,std::unordered_map,474009600 -random,memory,6871888,std::unordered_map,488439808 -random,memory,7157069,std::unordered_map,503103488 -random,memory,7455408,std::unordered_map,518389760 -random,memory,7759096,std::unordered_map,533991424 -random,memory,8071668,std::unordered_map,550060032 -random,memory,8388607,std::unordered_map,566202368 -random,memory,8388608,std::unordered_map,835330048 -random,memory,8388607,std::unordered_map,835330048 -random,memory,8388608,std::unordered_map,700846080 -random,memory,8800929,std::unordered_map,721985536 -random,memory,9229377,std::unordered_map,743981056 -random,memory,9669903,std::unordered_map,766681088 -random,time,10000000,std::unordered_map,4.058000 -random,memory,10006234,std::unordered_map,783917056 -random,memory,10471995,std::unordered_map,807780352 -random,memory,10944895,std::unordered_map,832118784 -random,memory,11437088,std::unordered_map,857387008 -random,memory,11942425,std::unordered_map,883281920 -random,memory,12462221,std::unordered_map,910032896 -random,memory,12997032,std::unordered_map,937488384 -random,memory,13549619,std::unordered_map,965877760 -random,memory,14116358,std::unordered_map,994889728 -random,memory,14695755,std::unordered_map,1024761856 -random,memory,15302036,std::unordered_map,1055805440 -random,memory,15922424,std::unordered_map,1087705088 -random,memory,16564203,std::unordered_map,1120698368 -random,memory,16777216,std::unordered_map,1131618304 -random,memory,16777217,std::unordered_map,1669545984 -random,memory,16777216,std::unordered_map,1669545984 -random,memory,16777217,std::unordered_map,1400582144 -random,memory,17598216,std::unordered_map,1442697216 -random,memory,18446165,std::unordered_map,1486299136 -random,memory,19317877,std::unordered_map,1531068416 -random,time,20000000,std::unordered_map,8.836000 -random,memory,20005214,std::unordered_map,1566400512 -random,memory,20925154,std::unordered_map,1613586432 -random,memory,21874581,std::unordered_map,1662332928 -random,memory,22851058,std::unordered_map,1712484352 -random,memory,23854188,std::unordered_map,1764044800 -random,memory,24889194,std::unordered_map,1817235456 -random,memory,25956856,std::unordered_map,1871986688 -random,memory,27052917,std::unordered_map,1928302592 -random,memory,28182525,std::unordered_map,1986330624 -random,memory,29343496,std::unordered_map,2045997056 -random,time,30000000,std::unordered_map,12.189000 -random,memory,30003110,std::unordered_map,2079850496 -random,memory,31219769,std::unordered_map,2142253056 -random,memory,32472749,std::unordered_map,2206601216 -random,memory,33554433,std::unordered_map,2262134784 -random,memory,33554434,std::unordered_map,3338063872 -random,memory,33554433,std::unordered_map,3338063872 -random,memory,33554434,std::unordered_map,2800144384 -random,memory,35192104,std::unordered_map,2884296704 -random,memory,36882738,std::unordered_map,2971029504 -random,memory,38624262,std::unordered_map,3060490240 -random,time,40000000,std::unordered_map,19.469000 -random,memory,40004148,std::unordered_map,3131314176 -random,memory,41836642,std::unordered_map,3225452544 -random,memory,43723699,std::unordered_map,3322322944 -random,memory,45664059,std::unordered_map,3422003200 -random,memory,47663952,std::unordered_map,3524804608 -random,memory,49729822,std::unordered_map,3630796800 -random,time,50000000,std::unordered_map,22.945000 -random,memory,50002422,std::unordered_map,3644837888 -random,memory,52133946,std::unordered_map,3754262528 -random,memory,54331297,std::unordered_map,3867045888 -random,memory,56594458,std::unordered_map,3983339520 -random,memory,58922279,std::unordered_map,4102909952 -random,time,60000000,std::unordered_map,26.724001 -random,memory,60000504,std::unordered_map,4158287872 -random,memory,62432538,std::unordered_map,4283162624 -random,memory,64938021,std::unordered_map,4411858944 -random,memory,67108867,std::unordered_map,4523159552 -random,memory,67108868,std::unordered_map,6675017728 -random,memory,67108867,std::unordered_map,6674984960 -random,memory,67108868,std::unordered_map,5599145984 -random,time,70000000,std::unordered_map,38.929001 -random,memory,70005239,std::unordered_map,5747884032 -random,memory,73363452,std::unordered_map,5920329728 -random,memory,76824572,std::unordered_map,6098083840 -random,time,80000000,std::unordered_map,42.421001 -random,memory,80004058,std::unordered_map,6261411840 -random,memory,83663065,std::unordered_map,6449299456 -random,memory,87431119,std::unordered_map,6642888704 -random,time,90000000,std::unordered_map,46.064999 -random,memory,90003941,std::unordered_map,6775009280 -random,memory,93967227,std::unordered_map,6978502656 -random,memory,98046859,std::unordered_map,7188000768 -random,time,100000000,std::unordered_map,49.808998 -random,memory,0,absl::flat_hash_map,15753216 -random,memory,246268,absl::flat_hash_map,15753216 -random,memory,246423,absl::flat_hash_map,11280384 -random,memory,458751,absl::flat_hash_map,11280384 -random,memory,458752,absl::flat_hash_map,29143040 -random,memory,494344,absl::flat_hash_map,29143040 -random,memory,494581,absl::flat_hash_map,20209664 -random,memory,587568,absl::flat_hash_map,20209664 -random,memory,587757,absl::flat_hash_map,19087360 -random,memory,917503,absl::flat_hash_map,19087360 -random,memory,917504,absl::flat_hash_map,54812672 -random,memory,933203,absl::flat_hash_map,54812672 -random,memory,933351,absl::flat_hash_map,36950016 -random,memory,1835007,absl::flat_hash_map,36950016 -random,memory,1835008,absl::flat_hash_map,108396544 -random,memory,1862555,absl::flat_hash_map,108396544 -random,memory,1862669,absl::flat_hash_map,72671232 -random,memory,3670015,absl::flat_hash_map,72671232 -random,memory,3670016,absl::flat_hash_map,215560192 -random,memory,3671239,absl::flat_hash_map,215560192 -random,memory,3671292,absl::flat_hash_map,144113664 -random,memory,7340031,absl::flat_hash_map,144113664 -random,memory,7340032,absl::flat_hash_map,429895680 -random,memory,7357262,absl::flat_hash_map,429895680 -random,memory,7357325,absl::flat_hash_map,287002624 -random,time,10000000,absl::flat_hash_map,0.917000 -random,memory,10004410,absl::flat_hash_map,287002624 -random,memory,14680063,absl::flat_hash_map,287002624 -random,memory,14680064,absl::flat_hash_map,858546176 -random,memory,14692106,absl::flat_hash_map,858546176 -random,memory,14692158,absl::flat_hash_map,572772352 -random,time,20000000,absl::flat_hash_map,2.219000 -random,memory,20000770,absl::flat_hash_map,572772352 -random,memory,29360127,absl::flat_hash_map,572772352 -random,memory,29360128,absl::flat_hash_map,1715863552 -random,memory,29364265,absl::flat_hash_map,1715863552 -random,memory,29364312,absl::flat_hash_map,1144311808 -random,time,30000000,absl::flat_hash_map,3.875000 -random,memory,30003827,absl::flat_hash_map,1144311808 -random,time,40000000,absl::flat_hash_map,5.254000 -random,memory,40009000,absl::flat_hash_map,1144311808 -random,time,50000000,absl::flat_hash_map,6.636000 -random,memory,50012195,absl::flat_hash_map,1144311808 -random,memory,58720255,absl::flat_hash_map,1144311808 -random,memory,58720256,absl::flat_hash_map,3430490112 -random,memory,58721997,absl::flat_hash_map,3430490112 -random,memory,58722035,absl::flat_hash_map,2287411200 -random,time,60000000,absl::flat_hash_map,9.076000 -random,memory,60006216,absl::flat_hash_map,2287411200 -random,time,70000000,absl::flat_hash_map,10.597000 -random,memory,70010624,absl::flat_hash_map,2287411200 -random,time,80000000,absl::flat_hash_map,12.121000 -random,memory,80009799,absl::flat_hash_map,2287411200 -random,time,90000000,absl::flat_hash_map,13.627000 -random,memory,90004802,absl::flat_hash_map,2287411200 -random,time,100000000,absl::flat_hash_map,15.121000 -random,memory,0,absl::parallel_flat_hash_map,10170368 -random,memory,411848,absl::parallel_flat_hash_map,10170368 -random,memory,411907,absl::parallel_flat_hash_map,16375808 -random,memory,434804,absl::parallel_flat_hash_map,16375808 -random,memory,434919,absl::parallel_flat_hash_map,19169280 -random,memory,499955,absl::parallel_flat_hash_map,19169280 -random,memory,500092,absl::parallel_flat_hash_map,18636800 -random,memory,767798,absl::parallel_flat_hash_map,18636800 -random,memory,767958,absl::parallel_flat_hash_map,20873216 -random,memory,810538,absl::parallel_flat_hash_map,20873216 -random,memory,810597,absl::parallel_flat_hash_map,30941184 -random,memory,819492,absl::parallel_flat_hash_map,30941184 -random,memory,819493,absl::parallel_flat_hash_map,39882752 -random,memory,835988,absl::parallel_flat_hash_map,39882752 -random,memory,836052,absl::parallel_flat_hash_map,36519936 -random,memory,894649,absl::parallel_flat_hash_map,36519936 -random,memory,894754,absl::parallel_flat_hash_map,34279424 -random,memory,1154846,absl::parallel_flat_hash_map,34242560 -random,memory,1154988,absl::parallel_flat_hash_map,35360768 -random,memory,1216254,absl::parallel_flat_hash_map,35360768 -random,memory,1216391,absl::parallel_flat_hash_map,36552704 -random,memory,1583771,absl::parallel_flat_hash_map,36552704 -random,memory,1583947,absl::parallel_flat_hash_map,39907328 -random,memory,1622124,absl::parallel_flat_hash_map,39907328 -random,memory,1622237,absl::parallel_flat_hash_map,52199424 -random,memory,1639638,absl::parallel_flat_hash_map,52199424 -random,memory,1639661,absl::parallel_flat_hash_map,65605632 -random,memory,1653514,absl::parallel_flat_hash_map,65605632 -random,memory,1653539,absl::parallel_flat_hash_map,72306688 -random,memory,1666888,absl::parallel_flat_hash_map,72306688 -random,memory,1666916,absl::parallel_flat_hash_map,79003648 -random,memory,1691750,absl::parallel_flat_hash_map,79003648 -random,memory,1691824,absl::parallel_flat_hash_map,72306688 -random,memory,1738807,absl::parallel_flat_hash_map,72306688 -random,memory,1738925,absl::parallel_flat_hash_map,65593344 -random,memory,2222488,absl::parallel_flat_hash_map,65593344 -random,memory,2222655,absl::parallel_flat_hash_map,70066176 -random,memory,2275719,absl::parallel_flat_hash_map,70066176 -random,memory,2275807,absl::parallel_flat_hash_map,72294400 -random,memory,2319482,absl::parallel_flat_hash_map,72294400 -random,memory,2319616,absl::parallel_flat_hash_map,76759040 -random,memory,2370330,absl::parallel_flat_hash_map,76759040 -random,memory,2370869,absl::parallel_flat_hash_map,74518528 -random,memory,2422380,absl::parallel_flat_hash_map,74518528 -random,memory,2422622,absl::parallel_flat_hash_map,76754944 -random,memory,2471942,absl::parallel_flat_hash_map,76754944 -random,memory,2472109,absl::parallel_flat_hash_map,74518528 -random,memory,3212727,absl::parallel_flat_hash_map,74518528 -random,memory,3212832,absl::parallel_flat_hash_map,92385280 -random,memory,3279060,absl::parallel_flat_hash_map,92385280 -random,memory,3279135,absl::parallel_flat_hash_map,110256128 -random,memory,3330438,absl::parallel_flat_hash_map,110256128 -random,memory,3330505,absl::parallel_flat_hash_map,119193600 -random,memory,3357017,absl::parallel_flat_hash_map,119193600 -random,memory,3357061,absl::parallel_flat_hash_map,123658240 -random,memory,3382717,absl::parallel_flat_hash_map,123658240 -random,memory,3382766,absl::parallel_flat_hash_map,128126976 -random,memory,3410216,absl::parallel_flat_hash_map,128126976 -random,memory,3410294,absl::parallel_flat_hash_map,119193600 -random,memory,3487891,absl::parallel_flat_hash_map,119193600 -random,memory,3487995,absl::parallel_flat_hash_map,114720768 -random,memory,3535204,absl::parallel_flat_hash_map,114720768 -random,memory,3535343,absl::parallel_flat_hash_map,110252032 -random,memory,3806272,absl::parallel_flat_hash_map,110252032 -random,memory,3806419,absl::parallel_flat_hash_map,119189504 -random,memory,3851844,absl::parallel_flat_hash_map,119189504 -random,memory,3851947,absl::parallel_flat_hash_map,137056256 -random,memory,3912789,absl::parallel_flat_hash_map,137056256 -random,memory,3912899,absl::parallel_flat_hash_map,141520896 -random,memory,3936842,absl::parallel_flat_hash_map,141520896 -random,memory,3936909,absl::parallel_flat_hash_map,150458368 -random,memory,3959815,absl::parallel_flat_hash_map,150458368 -random,memory,3959905,absl::parallel_flat_hash_map,159383552 -random,memory,4023374,absl::parallel_flat_hash_map,159383552 -random,memory,4023538,absl::parallel_flat_hash_map,150446080 -random,memory,4065053,absl::parallel_flat_hash_map,154906624 -random,memory,4204140,absl::parallel_flat_hash_map,154906624 -random,memory,4204297,absl::parallel_flat_hash_map,150437888 -random,memory,4301074,absl::parallel_flat_hash_map,150437888 -random,memory,4301249,absl::parallel_flat_hash_map,145965056 -random,memory,6549677,absl::parallel_flat_hash_map,145965056 -random,memory,6549825,absl::parallel_flat_hash_map,181694464 -random,memory,6997079,absl::parallel_flat_hash_map,181694464 -random,memory,6997211,absl::parallel_flat_hash_map,208494592 -random,memory,7230339,absl::parallel_flat_hash_map,208494592 -random,memory,7230465,absl::parallel_flat_hash_map,226361344 -random,memory,7291549,absl::parallel_flat_hash_map,226361344 -random,memory,7291670,absl::parallel_flat_hash_map,217427968 -random,memory,7657520,absl::parallel_flat_hash_map,217427968 -random,memory,7657607,absl::parallel_flat_hash_map,297807872 -random,memory,7804692,absl::parallel_flat_hash_map,297807872 -random,memory,7804756,absl::parallel_flat_hash_map,315654144 -random,memory,7862884,absl::parallel_flat_hash_map,324587520 -random,memory,8300446,absl::parallel_flat_hash_map,324587520 -random,memory,8300602,absl::parallel_flat_hash_map,288845824 -random,time,10000000,absl::parallel_flat_hash_map,0.424000 -random,memory,10549382,absl::parallel_flat_hash_map,287694848 -random,memory,14509630,absl::parallel_flat_hash_map,287694848 -random,memory,14509631,absl::parallel_flat_hash_map,537772032 -random,memory,14625205,absl::parallel_flat_hash_map,537772032 -random,memory,14625206,absl::parallel_flat_hash_map,662798336 -random,memory,14985538,absl::parallel_flat_hash_map,662798336 -random,memory,14985695,absl::parallel_flat_hash_map,573476864 -random,memory,15812726,absl::parallel_flat_hash_map,573476864 -random,memory,15812791,absl::parallel_flat_hash_map,591331328 -random,memory,16545857,absl::parallel_flat_hash_map,591331328 -random,memory,16545973,absl::parallel_flat_hash_map,573460480 -random,time,20000000,absl::parallel_flat_hash_map,0.954000 -random,memory,20807098,absl::parallel_flat_hash_map,573456384 -random,memory,29087356,absl::parallel_flat_hash_map,573456384 -random,memory,29087477,absl::parallel_flat_hash_map,644902912 -random,memory,29349303,absl::parallel_flat_hash_map,644902912 -random,memory,29349304,absl::parallel_flat_hash_map,1145028608 -random,memory,29350502,absl::parallel_flat_hash_map,1145028608 -random,memory,29350503,absl::parallel_flat_hash_map,1180753920 -random,memory,29353744,absl::parallel_flat_hash_map,1180753920 -random,memory,29353745,absl::parallel_flat_hash_map,1252196352 -random,memory,29356269,absl::parallel_flat_hash_map,1252196352 -random,memory,29356270,absl::parallel_flat_hash_map,1216475136 -random,memory,29371063,absl::parallel_flat_hash_map,1216475136 -random,memory,29371064,absl::parallel_flat_hash_map,1430810624 -random,memory,29406162,absl::parallel_flat_hash_map,1430810624 -random,memory,29406217,absl::parallel_flat_hash_map,1359364096 -random,memory,29624052,absl::parallel_flat_hash_map,1359364096 -random,memory,29624117,absl::parallel_flat_hash_map,1216425984 -random,memory,29711436,absl::parallel_flat_hash_map,1216397312 -random,memory,29711557,absl::parallel_flat_hash_map,1180667904 -random,memory,29779052,absl::parallel_flat_hash_map,1180643328 -random,memory,29779149,absl::parallel_flat_hash_map,1144913920 -random,time,30000000,absl::parallel_flat_hash_map,1.510000 -random,memory,30000000,absl::parallel_flat_hash_map,1144786944 -random,time,40000000,absl::parallel_flat_hash_map,1.977000 -random,memory,40000000,absl::parallel_flat_hash_map,1144782848 -random,time,50000000,absl::parallel_flat_hash_map,2.410000 -random,memory,50109244,absl::parallel_flat_hash_map,1144987648 -random,memory,58696456,absl::parallel_flat_hash_map,1144987648 -random,memory,58696478,absl::parallel_flat_hash_map,2145222656 -random,memory,58705957,absl::parallel_flat_hash_map,2145222656 -random,memory,58705958,absl::parallel_flat_hash_map,2288111616 -random,memory,58710284,absl::parallel_flat_hash_map,2288111616 -random,memory,58710285,absl::parallel_flat_hash_map,2502451200 -random,memory,58716154,absl::parallel_flat_hash_map,2502451200 -random,memory,58716155,absl::parallel_flat_hash_map,2645327872 -random,memory,58719191,absl::parallel_flat_hash_map,2645327872 -random,memory,58719214,absl::parallel_flat_hash_map,2573885440 -random,memory,58734799,absl::parallel_flat_hash_map,2573885440 -random,memory,58734800,absl::parallel_flat_hash_map,2859651072 -random,memory,58754516,absl::parallel_flat_hash_map,2859651072 -random,memory,58754559,absl::parallel_flat_hash_map,2716762112 -random,memory,59072502,absl::parallel_flat_hash_map,2716762112 -random,memory,59072581,absl::parallel_flat_hash_map,2359533568 -random,memory,59445636,absl::parallel_flat_hash_map,2359533568 -random,memory,59449345,absl::parallel_flat_hash_map,2288054272 -random,time,60000000,absl::parallel_flat_hash_map,3.184000 -random,memory,60659819,absl::parallel_flat_hash_map,2288070656 -random,time,70000000,absl::parallel_flat_hash_map,3.678000 -random,memory,70001013,absl::parallel_flat_hash_map,2288001024 -random,time,80000000,absl::parallel_flat_hash_map,4.120000 -random,memory,80008641,absl::parallel_flat_hash_map,2288046080 -random,time,90000000,absl::parallel_flat_hash_map,4.653000 -random,memory,90000000,absl::parallel_flat_hash_map,2287878144 -random,time,100000000,absl::parallel_flat_hash_map,5.111000 diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_mt_stl_flat_par_run2 b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_mt_stl_flat_par_run2 deleted file mode 100644 index d01e475..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_mt_stl_flat_par_run2 +++ /dev/null @@ -1,374 +0,0 @@ -random,memory,0,std::unordered_map,6262784 -random,memory,66147,std::unordered_map,6340608 -random,memory,82637,std::unordered_map,6340608 -random,memory,82649,std::unordered_map,7204864 -random,memory,99619,std::unordered_map,7204864 -random,memory,99639,std::unordered_map,8060928 -random,memory,116159,std::unordered_map,8060928 -random,memory,116174,std::unordered_map,8916992 -random,memory,129704,std::unordered_map,8916992 -random,memory,129718,std::unordered_map,9621504 -random,memory,131071,std::unordered_map,9621504 -random,memory,131072,std::unordered_map,11804672 -random,memory,135283,std::unordered_map,12038144 -random,memory,147679,std::unordered_map,12038144 -random,memory,147680,std::unordered_map,12660736 -random,memory,159818,std::unordered_map,12660736 -random,memory,159834,std::unordered_map,13291520 -random,memory,170700,std::unordered_map,13291520 -random,memory,170717,std::unordered_map,13914112 -random,memory,179486,std::unordered_map,14303232 -random,memory,188398,std::unordered_map,14303232 -random,memory,188409,std::unordered_map,14770176 -random,memory,195927,std::unordered_map,14770176 -random,memory,195939,std::unordered_map,15237120 -random,memory,203959,std::unordered_map,15630336 -random,memory,211779,std::unordered_map,15941632 -random,memory,220483,std::unordered_map,15941632 -random,memory,220484,std::unordered_map,16486400 -random,memory,229922,std::unordered_map,16953344 -random,memory,239746,std::unordered_map,17424384 -random,memory,250042,std::unordered_map,17424384 -random,memory,250053,std::unordered_map,17969152 -random,memory,259821,std::unordered_map,18436096 -random,memory,262143,std::unordered_map,18436096 -random,memory,262144,std::unordered_map,26927104 -random,memory,262143,std::unordered_map,26927104 -random,memory,262144,std::unordered_map,22724608 -random,memory,273337,std::unordered_map,23347200 -random,memory,281584,std::unordered_map,23814144 -random,memory,291318,std::unordered_map,24293376 -random,memory,301478,std::unordered_map,24838144 -random,memory,311977,std::unordered_map,25382912 -random,memory,322715,std::unordered_map,25935872 -random,memory,333302,std::unordered_map,26558464 -random,memory,343264,std::unordered_map,27025408 -random,memory,352924,std::unordered_map,27492352 -random,memory,361958,std::unordered_map,27963392 -random,memory,369865,std::unordered_map,28352512 -random,memory,378535,std::unordered_map,28819456 -random,memory,387443,std::unordered_map,29286400 -random,memory,396689,std::unordered_map,29753344 -random,memory,405932,std::unordered_map,30224384 -random,memory,415263,std::unordered_map,30691328 -random,memory,424445,std::unordered_map,31158272 -random,memory,433724,std::unordered_map,31625216 -random,memory,442183,std::unordered_map,32096256 -random,memory,450170,std::unordered_map,32485376 -random,memory,458338,std::unordered_map,32874496 -random,memory,466025,std::unordered_map,33341440 -random,memory,474254,std::unordered_map,33730560 -random,memory,482593,std::unordered_map,34123776 -random,memory,491058,std::unordered_map,34590720 -random,memory,499360,std::unordered_map,35057664 -random,memory,514720,std::unordered_map,35758080 -random,memory,522349,std::unordered_map,36147200 -random,memory,524287,std::unordered_map,36147200 -random,memory,524288,std::unordered_map,53125120 -random,memory,524287,std::unordered_map,53125120 -random,memory,524288,std::unordered_map,44720128 -random,memory,543462,std::unordered_map,45654016 -random,memory,552986,std::unordered_map,46198784 -random,memory,572111,std::unordered_map,47136768 -random,memory,608007,std::unordered_map,49004544 -random,memory,626830,std::unordered_map,49938432 -random,memory,662501,std::unordered_map,51838976 -random,memory,697143,std::unordered_map,53633024 -random,memory,730779,std::unordered_map,55349248 -random,memory,763966,std::unordered_map,57061376 -random,memory,797419,std::unordered_map,58777600 -random,memory,838262,std::unordered_map,60805120 -random,memory,877693,std::unordered_map,62910464 -random,memory,919839,std::unordered_map,65015808 -random,memory,963409,std::unordered_map,67280896 -random,memory,1006880,std::unordered_map,69541888 -random,memory,1048575,std::unordered_map,71491584 -random,memory,1048576,std::unordered_map,105275392 -random,memory,1048575,std::unordered_map,105279488 -random,memory,1048576,std::unordered_map,88469504 -random,memory,1101898,std::unordered_map,91201536 -random,memory,1159041,std::unordered_map,94085120 -random,memory,1214581,std::unordered_map,96968704 -random,memory,1277500,std::unordered_map,100163584 -random,memory,1336144,std::unordered_map,103211008 -random,memory,1401705,std::unordered_map,106561536 -random,memory,1467192,std::unordered_map,109993984 -random,memory,1539228,std::unordered_map,113659904 -random,memory,1610540,std::unordered_map,117321728 -random,memory,1679692,std::unordered_map,120909824 -random,memory,1754775,std::unordered_map,124731392 -random,memory,1829692,std::unordered_map,128552960 -random,memory,1911333,std::unordered_map,132685824 -random,memory,1992045,std::unordered_map,136900608 -random,memory,2078160,std::unordered_map,141344768 -random,memory,2097151,std::unordered_map,141967360 -random,memory,2097152,std::unordered_map,209526784 -random,memory,2097151,std::unordered_map,209526784 -random,memory,2097152,std::unordered_map,175906816 -random,memory,2203649,std::unordered_map,181366784 -random,memory,2313060,std::unordered_map,187060224 -random,memory,2430763,std::unordered_map,193064960 -random,memory,2547134,std::unordered_map,199041024 -random,memory,2663349,std::unordered_map,205049856 -random,memory,2783850,std::unordered_map,211288064 -random,memory,2910198,std::unordered_map,217763840 -random,memory,3037927,std::unordered_map,224313344 -random,memory,3177861,std::unordered_map,231485440 -random,memory,3313832,std::unordered_map,238505984 -random,memory,3457523,std::unordered_map,245760000 -random,memory,3602506,std::unordered_map,253329408 -random,memory,3759506,std::unordered_map,261279744 -random,memory,3911955,std::unordered_map,269160448 -random,memory,4070796,std::unordered_map,277348352 -random,memory,4194303,std::unordered_map,283435008 -random,memory,4194304,std::unordered_map,418156544 -random,memory,4194303,std::unordered_map,418156544 -random,memory,4194304,std::unordered_map,350916608 -random,memory,4407009,std::unordered_map,361832448 -random,memory,4622523,std::unordered_map,372908032 -random,memory,4840785,std::unordered_map,384139264 -random,memory,5069285,std::unordered_map,395759616 -random,memory,5303878,std::unordered_map,407928832 -random,memory,5543266,std::unordered_map,420253696 -random,memory,5791025,std::unordered_map,432885760 -random,memory,6044095,std::unordered_map,445906944 -random,memory,6305319,std::unordered_map,459321344 -random,memory,6577545,std::unordered_map,473284608 -random,memory,6858697,std::unordered_map,487792640 -random,memory,7147317,std::unordered_map,502611968 -random,memory,7442937,std::unordered_map,517742592 -random,memory,7748304,std::unordered_map,533422080 -random,memory,8065615,std::unordered_map,549801984 -random,memory,8388607,std::unordered_map,566177792 -random,memory,8388608,std::unordered_map,835301376 -random,memory,8388607,std::unordered_map,835301376 -random,memory,8388608,std::unordered_map,700817408 -random,memory,8802366,std::unordered_map,722030592 -random,memory,9226003,std::unordered_map,743714816 -random,memory,9659613,std::unordered_map,766099456 -random,time,10000000,std::unordered_map,4.219000 -random,memory,10004203,std::unordered_map,783728640 -random,memory,10467953,std::unordered_map,807591936 -random,memory,10941688,std::unordered_map,831930368 -random,memory,11430458,std::unordered_map,856965120 -random,memory,11932272,std::unordered_map,882782208 -random,memory,12452228,std::unordered_map,909455360 -random,memory,12986599,std::unordered_map,936914944 -random,memory,13534295,std::unordered_map,965074944 -random,memory,14099120,std::unordered_map,994086912 -random,memory,14685277,std::unordered_map,1024192512 -random,memory,15284333,std::unordered_map,1055002624 -random,memory,15901280,std::unordered_map,1086668800 -random,memory,16540739,std::unordered_map,1119506432 -random,memory,16777216,std::unordered_map,1131593728 -random,memory,16777217,std::unordered_map,1669521408 -random,memory,16777216,std::unordered_map,1669521408 -random,memory,16777217,std::unordered_map,1400561664 -random,memory,17594843,std::unordered_map,1442594816 -random,memory,18441168,std::unordered_map,1486041088 -random,memory,19314823,std::unordered_map,1530888192 -random,time,20000000,std::unordered_map,9.237000 -random,memory,20001449,std::unordered_map,1566220288 -random,memory,20922961,std::unordered_map,1613488128 -random,memory,21867605,std::unordered_map,1661919232 -random,memory,22842566,std::unordered_map,1711992832 -random,memory,23846371,std::unordered_map,1763553280 -random,memory,24877882,std::unordered_map,1816588288 -random,memory,25939795,std::unordered_map,1871106048 -random,memory,27035961,std::unordered_map,1927417856 -random,memory,28166332,std::unordered_map,1985449984 -random,memory,29325566,std::unordered_map,2045038592 -random,time,30000000,std::unordered_map,12.731000 -random,memory,30005312,std::unordered_map,2079903744 -random,memory,31220485,std::unordered_map,2142306304 -random,memory,32473699,std::unordered_map,2206654464 -random,memory,33554433,std::unordered_map,2261954560 -random,memory,33554434,std::unordered_map,3338039296 -random,memory,33554433,std::unordered_map,3338039296 -random,memory,33554434,std::unordered_map,2800119808 -random,memory,35190517,std::unordered_map,2884198400 -random,memory,36881331,std::unordered_map,2971004928 -random,memory,38618963,std::unordered_map,3060154368 -random,time,40000000,std::unordered_map,20.341999 -random,memory,40000165,std::unordered_map,3131133952 -random,memory,41832534,std::unordered_map,3225194496 -random,memory,43716435,std::unordered_map,3321987072 -random,memory,45660968,std::unordered_map,3421900800 -random,memory,47664978,std::unordered_map,3524857856 -random,memory,49728995,std::unordered_map,3630772224 -random,time,50000000,std::unordered_map,23.892000 -random,memory,50003393,std::unordered_map,3644891136 -random,memory,52133603,std::unordered_map,3754237952 -random,memory,54328284,std::unordered_map,3866943488 -random,memory,56590586,std::unordered_map,3983159296 -random,memory,58922512,std::unordered_map,4102963200 -random,time,60000000,std::unordered_map,27.804001 -random,memory,60002789,std::unordered_map,4158418944 -random,memory,62434781,std::unordered_map,4283215872 -random,memory,64938804,std::unordered_map,4411912192 -random,memory,67108867,std::unordered_map,4523261952 -random,memory,67108868,std::unordered_map,6674964480 -random,memory,67108867,std::unordered_map,6674964480 -random,memory,67108868,std::unordered_map,5599125504 -random,time,70000000,std::unordered_map,40.429001 -random,memory,70000893,std::unordered_map,5747630080 -random,memory,73361782,std::unordered_map,5920231424 -random,memory,76823810,std::unordered_map,6098063360 -random,time,80000000,std::unordered_map,43.907001 -random,memory,80001253,std::unordered_map,6261313536 -random,memory,83662519,std::unordered_map,6449278976 -random,memory,87433081,std::unordered_map,6642946048 -random,time,90000000,std::unordered_map,47.674000 -random,memory,90000620,std::unordered_map,6774833152 -random,memory,93961521,std::unordered_map,6978170880 -random,memory,98041926,std::unordered_map,7187746816 -random,time,100000000,std::unordered_map,51.557999 -random,memory,100000000,std::unordered_map,7288283136 -random,memory,0,phmap::flat_hash_map,15810560 -random,memory,239818,phmap::flat_hash_map,15810560 -random,memory,240039,phmap::flat_hash_map,11341824 -random,memory,458751,phmap::flat_hash_map,11341824 -random,memory,458752,phmap::flat_hash_map,29208576 -random,memory,487182,phmap::flat_hash_map,29208576 -random,memory,487416,phmap::flat_hash_map,20275200 -random,memory,669554,phmap::flat_hash_map,20275200 -random,memory,669793,phmap::flat_hash_map,19144704 -random,memory,917503,phmap::flat_hash_map,19144704 -random,memory,917504,phmap::flat_hash_map,54870016 -random,memory,956900,phmap::flat_hash_map,54870016 -random,memory,957064,phmap::flat_hash_map,37003264 -random,memory,1835007,phmap::flat_hash_map,37003264 -random,memory,1835008,phmap::flat_hash_map,108449792 -random,memory,1849262,phmap::flat_hash_map,108449792 -random,memory,1849368,phmap::flat_hash_map,72724480 -random,memory,3670015,phmap::flat_hash_map,72724480 -random,memory,3670016,phmap::flat_hash_map,215613440 -random,memory,3671196,phmap::flat_hash_map,215613440 -random,memory,3671252,phmap::flat_hash_map,144166912 -random,memory,7340031,phmap::flat_hash_map,144166912 -random,memory,7340032,phmap::flat_hash_map,429944832 -random,memory,7345416,phmap::flat_hash_map,429944832 -random,memory,7345476,phmap::flat_hash_map,287055872 -random,time,10000000,phmap::flat_hash_map,0.952000 -random,memory,10005890,phmap::flat_hash_map,287055872 -random,memory,14680063,phmap::flat_hash_map,287055872 -random,memory,14680064,phmap::flat_hash_map,858599424 -random,memory,14681371,phmap::flat_hash_map,858599424 -random,memory,14681406,phmap::flat_hash_map,572825600 -random,time,20000000,phmap::flat_hash_map,2.315000 -random,memory,20016264,phmap::flat_hash_map,572825600 -random,memory,29360127,phmap::flat_hash_map,572825600 -random,memory,29360128,phmap::flat_hash_map,1715916800 -random,memory,29363718,phmap::flat_hash_map,1715916800 -random,memory,29363764,phmap::flat_hash_map,1144373248 -random,time,30000000,phmap::flat_hash_map,4.047000 -random,memory,30001212,phmap::flat_hash_map,1144373248 -random,time,40000000,phmap::flat_hash_map,5.437000 -random,memory,40013884,phmap::flat_hash_map,1144373248 -random,time,50000000,phmap::flat_hash_map,6.807000 -random,memory,50012642,phmap::flat_hash_map,1144373248 -random,memory,58720255,phmap::flat_hash_map,1144373248 -random,memory,58720256,phmap::flat_hash_map,3430551552 -random,memory,58726692,phmap::flat_hash_map,3430551552 -random,memory,58726725,phmap::flat_hash_map,2287464448 -random,time,60000000,phmap::flat_hash_map,9.232000 -random,memory,60005561,phmap::flat_hash_map,2287464448 -random,time,70000000,phmap::flat_hash_map,10.811000 -random,memory,70008847,phmap::flat_hash_map,2287464448 -random,time,80000000,phmap::flat_hash_map,12.387000 -random,memory,80000522,phmap::flat_hash_map,2287464448 -random,time,90000000,phmap::flat_hash_map,13.936000 -random,memory,90006732,phmap::flat_hash_map,2287464448 -random,time,100000000,phmap::flat_hash_map,15.467000 -random,memory,0,phmap::parallel_flat_hash_map,37785600 -random,memory,1921864,phmap::parallel_flat_hash_map,37785600 -random,memory,1921990,phmap::parallel_flat_hash_map,75739136 -random,memory,2969317,phmap::parallel_flat_hash_map,75739136 -random,memory,2969478,phmap::parallel_flat_hash_map,73498624 -random,memory,3714056,phmap::parallel_flat_hash_map,73498624 -random,memory,3714057,phmap::parallel_flat_hash_map,162828288 -random,memory,4501128,phmap::parallel_flat_hash_map,162828288 -random,memory,4501312,phmap::parallel_flat_hash_map,144936960 -random,memory,7228404,phmap::parallel_flat_hash_map,144936960 -random,memory,7228507,phmap::parallel_flat_hash_map,207466496 -random,memory,7275817,phmap::parallel_flat_hash_map,207466496 -random,memory,7275940,phmap::parallel_flat_hash_map,216391680 -random,memory,7498901,phmap::parallel_flat_hash_map,216391680 -random,memory,7499009,phmap::parallel_flat_hash_map,323563520 -random,memory,7543411,phmap::parallel_flat_hash_map,323563520 -random,memory,7543523,phmap::parallel_flat_hash_map,305696768 -random,memory,8354331,phmap::parallel_flat_hash_map,305696768 -random,memory,8354519,phmap::parallel_flat_hash_map,287825920 -random,time,10000000,phmap::parallel_flat_hash_map,0.406000 -random,memory,10849279,phmap::parallel_flat_hash_map,288239616 -random,memory,14474156,phmap::parallel_flat_hash_map,288239616 -random,memory,14474206,phmap::parallel_flat_hash_map,395419648 -random,memory,14565308,phmap::parallel_flat_hash_map,395419648 -random,memory,14565382,phmap::parallel_flat_hash_map,466870272 -random,memory,14678635,phmap::parallel_flat_hash_map,466870272 -random,memory,14678636,phmap::parallel_flat_hash_map,609767424 -random,memory,14683611,phmap::parallel_flat_hash_map,627630080 -random,memory,14800987,phmap::parallel_flat_hash_map,627630080 -random,memory,14801039,phmap::parallel_flat_hash_map,663339008 -random,memory,15290395,phmap::parallel_flat_hash_map,663339008 -random,memory,15290450,phmap::parallel_flat_hash_map,591884288 -random,memory,15480090,phmap::parallel_flat_hash_map,591884288 -random,memory,15480237,phmap::parallel_flat_hash_map,574021632 -random,time,20000000,phmap::parallel_flat_hash_map,0.912000 -random,memory,20838704,phmap::parallel_flat_hash_map,574017536 -random,memory,28900745,phmap::parallel_flat_hash_map,574017536 -random,memory,28900840,phmap::parallel_flat_hash_map,645464064 -random,memory,29185725,phmap::parallel_flat_hash_map,645464064 -random,memory,29185801,phmap::parallel_flat_hash_map,788357120 -random,memory,29330003,phmap::parallel_flat_hash_map,788357120 -random,memory,29330055,phmap::parallel_flat_hash_map,1002696704 -random,memory,29349427,phmap::parallel_flat_hash_map,1002696704 -random,memory,29349428,phmap::parallel_flat_hash_map,1145589760 -random,memory,29352020,phmap::parallel_flat_hash_map,1145589760 -random,memory,29352021,phmap::parallel_flat_hash_map,1181310976 -random,memory,29355250,phmap::parallel_flat_hash_map,1181310976 -random,memory,29355251,phmap::parallel_flat_hash_map,1252745216 -random,memory,29363721,phmap::parallel_flat_hash_map,1252745216 -random,memory,29363722,phmap::parallel_flat_hash_map,1324191744 -random,memory,29370810,phmap::parallel_flat_hash_map,1324191744 -random,memory,29370811,phmap::parallel_flat_hash_map,1431351296 -random,memory,29553925,phmap::parallel_flat_hash_map,1431351296 -random,memory,29553998,phmap::parallel_flat_hash_map,1252732928 -random,memory,29747915,phmap::parallel_flat_hash_map,1252732928 -random,memory,29748035,phmap::parallel_flat_hash_map,1181249536 -random,memory,29849570,phmap::parallel_flat_hash_map,1181167616 -random,memory,29849635,phmap::parallel_flat_hash_map,1145438208 -random,time,30000000,phmap::parallel_flat_hash_map,1.470000 -random,memory,30044703,phmap::parallel_flat_hash_map,1145556992 -random,time,40000000,phmap::parallel_flat_hash_map,1.935000 -random,memory,40000000,phmap::parallel_flat_hash_map,1145356288 -random,time,50000000,phmap::parallel_flat_hash_map,2.359000 -random,memory,50622111,phmap::parallel_flat_hash_map,1145556992 -random,memory,58670362,phmap::parallel_flat_hash_map,1145556992 -random,memory,58670384,phmap::parallel_flat_hash_map,2145792000 -random,memory,58698634,phmap::parallel_flat_hash_map,2145792000 -random,memory,58698635,phmap::parallel_flat_hash_map,2288680960 -random,memory,58715876,phmap::parallel_flat_hash_map,2288685056 -random,memory,58715877,phmap::parallel_flat_hash_map,2574458880 -random,memory,58720779,phmap::parallel_flat_hash_map,2574458880 -random,memory,58720802,phmap::parallel_flat_hash_map,2503016448 -random,memory,58723605,phmap::parallel_flat_hash_map,2503016448 -random,memory,58723606,phmap::parallel_flat_hash_map,2645905408 -random,memory,58729212,phmap::parallel_flat_hash_map,2645905408 -random,memory,58729213,phmap::parallel_flat_hash_map,2788790272 -random,memory,58741845,phmap::parallel_flat_hash_map,2860228608 -random,memory,58760201,phmap::parallel_flat_hash_map,2860228608 -random,memory,58760227,phmap::parallel_flat_hash_map,2788786176 -random,memory,58967162,phmap::parallel_flat_hash_map,2788786176 -random,memory,58967269,phmap::parallel_flat_hash_map,2360111104 -random,memory,59568547,phmap::parallel_flat_hash_map,2360111104 -random,memory,59568692,phmap::parallel_flat_hash_map,2288660480 -random,time,60000000,phmap::parallel_flat_hash_map,3.120000 -random,memory,60764467,phmap::parallel_flat_hash_map,2288656384 -random,time,70000000,phmap::parallel_flat_hash_map,3.567000 -random,memory,70747076,phmap::parallel_flat_hash_map,2288652288 -random,time,80000000,phmap::parallel_flat_hash_map,4.004000 -random,memory,80005300,phmap::parallel_flat_hash_map,2288590848 -random,time,90000000,phmap::parallel_flat_hash_map,4.527000 -random,memory,90699330,phmap::parallel_flat_hash_map,2288656384 -random,time,100000000,phmap::parallel_flat_hash_map,4.986000 diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_stl_flat b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_stl_flat deleted file mode 100644 index aee5272..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_stl_flat +++ /dev/null @@ -1,225 +0,0 @@ -random,memory,0,std::unordered_map,6688768 -random,memory,94189,std::unordered_map,7778304 -random,memory,109769,std::unordered_map,8556544 -random,memory,122654,std::unordered_map,9261056 -random,memory,131072,std::unordered_map,13778944 -random,memory,131072,std::unordered_map,11677696 -random,memory,139313,std::unordered_map,12144640 -random,memory,153935,std::unordered_map,12943360 -random,memory,165753,std::unordered_map,13643776 -random,memory,177309,std::unordered_map,14266368 -random,memory,188501,std::unordered_map,14733312 -random,memory,198715,std::unordered_map,15282176 -random,memory,208082,std::unordered_map,15749120 -random,memory,217961,std::unordered_map,16293888 -random,memory,228367,std::unordered_map,16838656 -random,memory,248349,std::unordered_map,17854464 -random,memory,262144,std::unordered_map,26890240 -random,memory,262144,std::unordered_map,22683648 -random,memory,276980,std::unordered_map,23543808 -random,memory,296991,std::unordered_map,24477696 -random,memory,317107,std::unordered_map,25571328 -random,memory,336773,std::unordered_map,26660864 -random,memory,356852,std::unordered_map,27676672 -random,memory,374674,std::unordered_map,28532736 -random,memory,393524,std::unordered_map,29544448 -random,memory,412757,std::unordered_map,30482432 -random,memory,430634,std::unordered_map,31416320 -random,memory,456096,std::unordered_map,32743424 -random,memory,480998,std::unordered_map,34017280 -random,memory,506761,std::unordered_map,35340288 -random,memory,524288,std::unordered_map,53096448 -random,memory,524288,std::unordered_map,44687360 -random,memory,559809,std::unordered_map,46481408 -random,memory,586869,std::unordered_map,47882240 -random,memory,625664,std::unordered_map,49827840 -random,memory,661343,std::unordered_map,51781632 -random,memory,697780,std::unordered_map,53575680 -random,memory,732263,std::unordered_map,55369728 -random,memory,767346,std::unordered_map,57163776 -random,memory,801265,std::unordered_map,58880000 -random,memory,843401,std::unordered_map,61063168 -random,memory,883359,std::unordered_map,63008768 -random,memory,923703,std::unordered_map,65114112 -random,memory,962276,std::unordered_map,67145728 -random,memory,1000499,std::unordered_map,69173248 -random,memory,1046029,std::unordered_map,71438336 -random,memory,1048576,std::unordered_map,105222144 -random,memory,1048576,std::unordered_map,88412160 -random,memory,1103068,std::unordered_map,91217920 -random,memory,1166341,std::unordered_map,94412800 -random,memory,1227947,std::unordered_map,97611776 -random,memory,1288360,std::unordered_map,100732928 -random,memory,1348784,std::unordered_map,103849984 -random,memory,1415960,std::unordered_map,107282432 -random,memory,1483476,std::unordered_map,110710784 -random,memory,1551489,std::unordered_map,114221056 -random,memory,1622998,std::unordered_map,117964800 -random,memory,1698528,std::unordered_map,121786368 -random,memory,1774279,std::unordered_map,125607936 -random,memory,1852847,std::unordered_map,129658880 -random,memory,1935045,std::unordered_map,133955584 -random,memory,2016124,std::unordered_map,138084352 -random,memory,2097152,std::unordered_map,209465344 -random,memory,2097152,std::unordered_map,175845376 -random,memory,2202017,std::unordered_map,181227520 -random,memory,2308233,std::unordered_map,186687488 -random,memory,2421995,std::unordered_map,192569344 -random,memory,2540952,std::unordered_map,198647808 -random,memory,2664197,std::unordered_map,205049856 -random,memory,2786842,std::unordered_map,211288064 -random,memory,2914893,std::unordered_map,217915392 -random,memory,3042611,std::unordered_map,224464896 -random,memory,3177918,std::unordered_map,231325696 -random,memory,3316856,std::unordered_map,238583808 -random,memory,3463219,std::unordered_map,245989376 -random,memory,3606297,std::unordered_map,253440000 -random,memory,3758685,std::unordered_map,261234688 -random,memory,3918344,std::unordered_map,269426688 -random,memory,4082497,std::unordered_map,277848064 -random,memory,4194304,std::unordered_map,418111488 -random,memory,4194304,std::unordered_map,350871552 -random,memory,4405390,std::unordered_map,361631744 -random,memory,4622462,std::unordered_map,372862976 -random,memory,4843447,std::unordered_map,384253952 -random,memory,5077057,std::unordered_map,396234752 -random,memory,5310379,std::unordered_map,408170496 -random,memory,5553459,std::unordered_map,420728832 -random,memory,5801485,std::unordered_map,433438720 -random,memory,6061642,std::unordered_map,446771200 -random,memory,6321544,std::unordered_map,460189696 -random,memory,6597657,std::unordered_map,474308608 -random,memory,6874638,std::unordered_map,488579072 -random,memory,7165061,std::unordered_map,503480320 -random,memory,7464156,std::unordered_map,518844416 -random,memory,7769321,std::unordered_map,534519808 -random,memory,8083787,std::unordered_map,550666240 -random,memory,8388608,std::unordered_map,835313664 -random,memory,8388608,std::unordered_map,700833792 -random,memory,8799364,std::unordered_map,721883136 -random,memory,9228893,std::unordered_map,743878656 -random,memory,9664620,std::unordered_map,766345216 -random,time,10000000,std::unordered_map,4.373000 -random,memory,10005035,std::unordered_map,783814656 -random,memory,10114472,std::unordered_map,789352448 -random,memory,10581340,std::unordered_map,813371392 -random,memory,11060026,std::unordered_map,837943296 -random,memory,11554760,std::unordered_map,863367168 -random,memory,12063672,std::unordered_map,889499648 -random,memory,12588048,std::unordered_map,916488192 -random,memory,13128601,std::unordered_map,944173056 -random,memory,13682065,std::unordered_map,972640256 -random,memory,14255923,std::unordered_map,1002127360 -random,memory,14847433,std::unordered_map,1032548352 -random,memory,15456818,std::unordered_map,1063825408 -random,memory,16080459,std::unordered_map,1095798784 -random,memory,16726977,std::unordered_map,1128947712 -random,memory,16777217,std::unordered_map,1669529600 -random,memory,16777217,std::unordered_map,1400569856 -random,memory,17595085,std::unordered_map,1442603008 -random,memory,18438183,std::unordered_map,1485893632 -random,memory,19311311,std::unordered_map,1530658816 -random,time,20000000,std::unordered_map,8.842000 -random,memory,20000810,std::unordered_map,1566150656 -random,memory,20207250,std::unordered_map,1576677376 -random,memory,21131235,std::unordered_map,1624178688 -random,memory,22086647,std::unordered_map,1673158656 -random,memory,23064920,std::unordered_map,1723465728 -random,memory,24073299,std::unordered_map,1775177728 -random,memory,25110516,std::unordered_map,1828532224 -random,memory,26181575,std::unordered_map,1883516928 -random,memory,27282467,std::unordered_map,1940062208 -random,memory,28422030,std::unordered_map,1998561280 -random,memory,29593778,std::unordered_map,2058694656 -random,time,30000000,std::unordered_map,12.018000 -random,memory,30000114,std::unordered_map,2079678464 -random,memory,30796939,std::unordered_map,2120552448 -random,memory,32039135,std::unordered_map,2184355840 -random,memory,33320240,std::unordered_map,2250104832 -random,memory,33554434,std::unordered_map,3338047488 -random,memory,33554434,std::unordered_map,2800128000 -random,memory,35194605,std::unordered_map,2884362240 -random,memory,36886268,std::unordered_map,2971246592 -random,memory,38624437,std::unordered_map,3060473856 -random,time,40000000,std::unordered_map,18.924999 -random,memory,40003628,std::unordered_map,3131297792 -random,memory,40417618,std::unordered_map,3152588800 -random,memory,42264600,std::unordered_map,3247431680 -random,memory,44165274,std::unordered_map,3345006592 -random,memory,46122181,std::unordered_map,3445542912 -random,memory,48137986,std::unordered_map,3549040640 -random,time,50000000,std::unordered_map,22.261999 -random,memory,50001657,std::unordered_map,3644821504 -random,memory,50216354,std::unordered_map,3655737344 -random,memory,52356080,std::unordered_map,3765637120 -random,memory,54556920,std::unordered_map,3878731776 -random,memory,56823702,std::unordered_map,3995103232 -random,memory,59156726,std::unordered_map,4114980864 -random,time,60000000,std::unordered_map,25.919001 -random,memory,60002957,std::unordered_map,4158427136 -random,memory,61562985,std::unordered_map,4238528512 -random,memory,64040759,std::unordered_map,4365746176 -random,memory,66594964,std::unordered_map,4496855040 -random,memory,67108868,std::unordered_map,6675001344 -random,memory,67108868,std::unordered_map,5599162368 -random,time,70000000,std::unordered_map,37.734001 -random,memory,70005322,std::unordered_map,5747867648 -random,memory,70384719,std::unordered_map,5767364608 -random,memory,73758686,std::unordered_map,5940674560 -random,memory,77233925,std::unordered_map,6119124992 -random,time,80000000,std::unordered_map,41.104000 -random,memory,80002815,std::unordered_map,6261395456 -random,memory,80810186,std::unordered_map,6302810112 -random,memory,84496350,std::unordered_map,6492106752 -random,memory,88293603,std::unordered_map,6687174656 -random,time,90000000,std::unordered_map,44.681000 -random,memory,90004365,std::unordered_map,6774996992 -random,memory,92206220,std::unordered_map,6888091648 -random,memory,96235553,std::unordered_map,7095013376 -random,time,100000000,std::unordered_map,48.382000 -random,memory,0,absl::flat_hash_map,15740928 -random,memory,240967,absl::flat_hash_map,11272192 -random,memory,458751,absl::flat_hash_map,11272192 -random,memory,458752,absl::flat_hash_map,29138944 -random,memory,493930,absl::flat_hash_map,20205568 -random,memory,678845,absl::flat_hash_map,19087360 -random,memory,917503,absl::flat_hash_map,19087360 -random,memory,917504,absl::flat_hash_map,54812672 -random,memory,930488,absl::flat_hash_map,36945920 -random,memory,1835007,absl::flat_hash_map,36945920 -random,memory,1835008,absl::flat_hash_map,108392448 -random,memory,1860950,absl::flat_hash_map,72667136 -random,memory,3670015,absl::flat_hash_map,72667136 -random,memory,3670016,absl::flat_hash_map,215556096 -random,memory,3681610,absl::flat_hash_map,144109568 -random,memory,7340031,absl::flat_hash_map,144109568 -random,memory,7340032,absl::flat_hash_map,429883392 -random,memory,7348393,absl::flat_hash_map,286994432 -random,time,10000000,absl::flat_hash_map,0.917000 -random,memory,10000174,absl::flat_hash_map,286994432 -random,memory,14680063,absl::flat_hash_map,286994432 -random,memory,14680064,absl::flat_hash_map,858537984 -random,memory,14680512,absl::flat_hash_map,572764160 -random,time,20000000,absl::flat_hash_map,2.199000 -random,memory,20013320,absl::flat_hash_map,572764160 -random,memory,29360127,absl::flat_hash_map,572764160 -random,memory,29360128,absl::flat_hash_map,1715859456 -random,memory,29369210,absl::flat_hash_map,1144307712 -random,time,30000000,absl::flat_hash_map,3.818000 -random,memory,30002294,absl::flat_hash_map,1144307712 -random,time,40000000,absl::flat_hash_map,5.070000 -random,memory,40010797,absl::flat_hash_map,1144307712 -random,time,50000000,absl::flat_hash_map,6.323000 -random,memory,50009729,absl::flat_hash_map,1144307712 -random,memory,58720255,absl::flat_hash_map,1144307712 -random,memory,58720256,absl::flat_hash_map,3430486016 -random,memory,58721491,absl::flat_hash_map,2287403008 -random,time,60000000,absl::flat_hash_map,8.700000 -random,memory,60002661,absl::flat_hash_map,2287403008 -random,time,70000000,absl::flat_hash_map,10.155000 -random,memory,70010887,absl::flat_hash_map,2287403008 -random,time,80000000,absl::flat_hash_map,11.589000 -random,memory,80002681,absl::flat_hash_map,2287403008 -random,time,90000000,absl::flat_hash_map,13.020000 -random,memory,90012468,absl::flat_hash_map,2287403008 -random,time,100000000,absl::flat_hash_map,14.407000 diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_stl_flat_par b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_stl_flat_par deleted file mode 100644 index 8714815..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_stl_flat_par +++ /dev/null @@ -1,499 +0,0 @@ -random,memory,0,std::unordered_map,6688768 -random,memory,94189,std::unordered_map,7778304 -random,memory,109769,std::unordered_map,8556544 -random,memory,122654,std::unordered_map,9261056 -random,memory,131072,std::unordered_map,13778944 -random,memory,131072,std::unordered_map,11677696 -random,memory,139313,std::unordered_map,12144640 -random,memory,153935,std::unordered_map,12943360 -random,memory,165753,std::unordered_map,13643776 -random,memory,177309,std::unordered_map,14266368 -random,memory,188501,std::unordered_map,14733312 -random,memory,198715,std::unordered_map,15282176 -random,memory,208082,std::unordered_map,15749120 -random,memory,217961,std::unordered_map,16293888 -random,memory,228367,std::unordered_map,16838656 -random,memory,248349,std::unordered_map,17854464 -random,memory,262144,std::unordered_map,26890240 -random,memory,262144,std::unordered_map,22683648 -random,memory,276980,std::unordered_map,23543808 -random,memory,296991,std::unordered_map,24477696 -random,memory,317107,std::unordered_map,25571328 -random,memory,336773,std::unordered_map,26660864 -random,memory,356852,std::unordered_map,27676672 -random,memory,374674,std::unordered_map,28532736 -random,memory,393524,std::unordered_map,29544448 -random,memory,412757,std::unordered_map,30482432 -random,memory,430634,std::unordered_map,31416320 -random,memory,456096,std::unordered_map,32743424 -random,memory,480998,std::unordered_map,34017280 -random,memory,506761,std::unordered_map,35340288 -random,memory,524288,std::unordered_map,53096448 -random,memory,524288,std::unordered_map,44687360 -random,memory,559809,std::unordered_map,46481408 -random,memory,586869,std::unordered_map,47882240 -random,memory,625664,std::unordered_map,49827840 -random,memory,661343,std::unordered_map,51781632 -random,memory,697780,std::unordered_map,53575680 -random,memory,732263,std::unordered_map,55369728 -random,memory,767346,std::unordered_map,57163776 -random,memory,801265,std::unordered_map,58880000 -random,memory,843401,std::unordered_map,61063168 -random,memory,883359,std::unordered_map,63008768 -random,memory,923703,std::unordered_map,65114112 -random,memory,962276,std::unordered_map,67145728 -random,memory,1000499,std::unordered_map,69173248 -random,memory,1046029,std::unordered_map,71438336 -random,memory,1048576,std::unordered_map,105222144 -random,memory,1048576,std::unordered_map,88412160 -random,memory,1103068,std::unordered_map,91217920 -random,memory,1166341,std::unordered_map,94412800 -random,memory,1227947,std::unordered_map,97611776 -random,memory,1288360,std::unordered_map,100732928 -random,memory,1348784,std::unordered_map,103849984 -random,memory,1415960,std::unordered_map,107282432 -random,memory,1483476,std::unordered_map,110710784 -random,memory,1551489,std::unordered_map,114221056 -random,memory,1622998,std::unordered_map,117964800 -random,memory,1698528,std::unordered_map,121786368 -random,memory,1774279,std::unordered_map,125607936 -random,memory,1852847,std::unordered_map,129658880 -random,memory,1935045,std::unordered_map,133955584 -random,memory,2016124,std::unordered_map,138084352 -random,memory,2097152,std::unordered_map,209465344 -random,memory,2097152,std::unordered_map,175845376 -random,memory,2202017,std::unordered_map,181227520 -random,memory,2308233,std::unordered_map,186687488 -random,memory,2421995,std::unordered_map,192569344 -random,memory,2540952,std::unordered_map,198647808 -random,memory,2664197,std::unordered_map,205049856 -random,memory,2786842,std::unordered_map,211288064 -random,memory,2914893,std::unordered_map,217915392 -random,memory,3042611,std::unordered_map,224464896 -random,memory,3177918,std::unordered_map,231325696 -random,memory,3316856,std::unordered_map,238583808 -random,memory,3463219,std::unordered_map,245989376 -random,memory,3606297,std::unordered_map,253440000 -random,memory,3758685,std::unordered_map,261234688 -random,memory,3918344,std::unordered_map,269426688 -random,memory,4082497,std::unordered_map,277848064 -random,memory,4194304,std::unordered_map,418111488 -random,memory,4194304,std::unordered_map,350871552 -random,memory,4405390,std::unordered_map,361631744 -random,memory,4622462,std::unordered_map,372862976 -random,memory,4843447,std::unordered_map,384253952 -random,memory,5077057,std::unordered_map,396234752 -random,memory,5310379,std::unordered_map,408170496 -random,memory,5553459,std::unordered_map,420728832 -random,memory,5801485,std::unordered_map,433438720 -random,memory,6061642,std::unordered_map,446771200 -random,memory,6321544,std::unordered_map,460189696 -random,memory,6597657,std::unordered_map,474308608 -random,memory,6874638,std::unordered_map,488579072 -random,memory,7165061,std::unordered_map,503480320 -random,memory,7464156,std::unordered_map,518844416 -random,memory,7769321,std::unordered_map,534519808 -random,memory,8083787,std::unordered_map,550666240 -random,memory,8388608,std::unordered_map,835313664 -random,memory,8388608,std::unordered_map,700833792 -random,memory,8799364,std::unordered_map,721883136 -random,memory,9228893,std::unordered_map,743878656 -random,memory,9664620,std::unordered_map,766345216 -random,time,10000000,std::unordered_map,4.373000 -random,memory,10005035,std::unordered_map,783814656 -random,memory,10114472,std::unordered_map,789352448 -random,memory,10581340,std::unordered_map,813371392 -random,memory,11060026,std::unordered_map,837943296 -random,memory,11554760,std::unordered_map,863367168 -random,memory,12063672,std::unordered_map,889499648 -random,memory,12588048,std::unordered_map,916488192 -random,memory,13128601,std::unordered_map,944173056 -random,memory,13682065,std::unordered_map,972640256 -random,memory,14255923,std::unordered_map,1002127360 -random,memory,14847433,std::unordered_map,1032548352 -random,memory,15456818,std::unordered_map,1063825408 -random,memory,16080459,std::unordered_map,1095798784 -random,memory,16726977,std::unordered_map,1128947712 -random,memory,16777217,std::unordered_map,1669529600 -random,memory,16777217,std::unordered_map,1400569856 -random,memory,17595085,std::unordered_map,1442603008 -random,memory,18438183,std::unordered_map,1485893632 -random,memory,19311311,std::unordered_map,1530658816 -random,time,20000000,std::unordered_map,8.842000 -random,memory,20000810,std::unordered_map,1566150656 -random,memory,20207250,std::unordered_map,1576677376 -random,memory,21131235,std::unordered_map,1624178688 -random,memory,22086647,std::unordered_map,1673158656 -random,memory,23064920,std::unordered_map,1723465728 -random,memory,24073299,std::unordered_map,1775177728 -random,memory,25110516,std::unordered_map,1828532224 -random,memory,26181575,std::unordered_map,1883516928 -random,memory,27282467,std::unordered_map,1940062208 -random,memory,28422030,std::unordered_map,1998561280 -random,memory,29593778,std::unordered_map,2058694656 -random,time,30000000,std::unordered_map,12.018000 -random,memory,30000114,std::unordered_map,2079678464 -random,memory,30796939,std::unordered_map,2120552448 -random,memory,32039135,std::unordered_map,2184355840 -random,memory,33320240,std::unordered_map,2250104832 -random,memory,33554434,std::unordered_map,3338047488 -random,memory,33554434,std::unordered_map,2800128000 -random,memory,35194605,std::unordered_map,2884362240 -random,memory,36886268,std::unordered_map,2971246592 -random,memory,38624437,std::unordered_map,3060473856 -random,time,40000000,std::unordered_map,18.924999 -random,memory,40003628,std::unordered_map,3131297792 -random,memory,40417618,std::unordered_map,3152588800 -random,memory,42264600,std::unordered_map,3247431680 -random,memory,44165274,std::unordered_map,3345006592 -random,memory,46122181,std::unordered_map,3445542912 -random,memory,48137986,std::unordered_map,3549040640 -random,time,50000000,std::unordered_map,22.261999 -random,memory,50001657,std::unordered_map,3644821504 -random,memory,50216354,std::unordered_map,3655737344 -random,memory,52356080,std::unordered_map,3765637120 -random,memory,54556920,std::unordered_map,3878731776 -random,memory,56823702,std::unordered_map,3995103232 -random,memory,59156726,std::unordered_map,4114980864 -random,time,60000000,std::unordered_map,25.919001 -random,memory,60002957,std::unordered_map,4158427136 -random,memory,61562985,std::unordered_map,4238528512 -random,memory,64040759,std::unordered_map,4365746176 -random,memory,66594964,std::unordered_map,4496855040 -random,memory,67108868,std::unordered_map,6675001344 -random,memory,67108868,std::unordered_map,5599162368 -random,time,70000000,std::unordered_map,37.734001 -random,memory,70005322,std::unordered_map,5747867648 -random,memory,70384719,std::unordered_map,5767364608 -random,memory,73758686,std::unordered_map,5940674560 -random,memory,77233925,std::unordered_map,6119124992 -random,time,80000000,std::unordered_map,41.104000 -random,memory,80002815,std::unordered_map,6261395456 -random,memory,80810186,std::unordered_map,6302810112 -random,memory,84496350,std::unordered_map,6492106752 -random,memory,88293603,std::unordered_map,6687174656 -random,time,90000000,std::unordered_map,44.681000 -random,memory,90004365,std::unordered_map,6774996992 -random,memory,92206220,std::unordered_map,6888091648 -random,memory,96235553,std::unordered_map,7095013376 -random,time,100000000,std::unordered_map,48.382000 -random,memory,0,phmap::flat_hash_map,15740928 -random,memory,240967,phmap::flat_hash_map,11272192 -random,memory,458751,phmap::flat_hash_map,11272192 -random,memory,458752,phmap::flat_hash_map,29138944 -random,memory,493930,phmap::flat_hash_map,20205568 -random,memory,678845,phmap::flat_hash_map,19087360 -random,memory,917503,phmap::flat_hash_map,19087360 -random,memory,917504,phmap::flat_hash_map,54812672 -random,memory,930488,phmap::flat_hash_map,36945920 -random,memory,1835007,phmap::flat_hash_map,36945920 -random,memory,1835008,phmap::flat_hash_map,108392448 -random,memory,1860950,phmap::flat_hash_map,72667136 -random,memory,3670015,phmap::flat_hash_map,72667136 -random,memory,3670016,phmap::flat_hash_map,215556096 -random,memory,3681610,phmap::flat_hash_map,144109568 -random,memory,7340031,phmap::flat_hash_map,144109568 -random,memory,7340032,phmap::flat_hash_map,429883392 -random,memory,7348393,phmap::flat_hash_map,286994432 -random,time,10000000,phmap::flat_hash_map,0.917000 -random,memory,10000174,phmap::flat_hash_map,286994432 -random,memory,14680063,phmap::flat_hash_map,286994432 -random,memory,14680064,phmap::flat_hash_map,858537984 -random,memory,14680512,phmap::flat_hash_map,572764160 -random,time,20000000,phmap::flat_hash_map,2.199000 -random,memory,20013320,phmap::flat_hash_map,572764160 -random,memory,29360127,phmap::flat_hash_map,572764160 -random,memory,29360128,phmap::flat_hash_map,1715859456 -random,memory,29369210,phmap::flat_hash_map,1144307712 -random,time,30000000,phmap::flat_hash_map,3.818000 -random,memory,30002294,phmap::flat_hash_map,1144307712 -random,time,40000000,phmap::flat_hash_map,5.070000 -random,memory,40010797,phmap::flat_hash_map,1144307712 -random,time,50000000,phmap::flat_hash_map,6.323000 -random,memory,50009729,phmap::flat_hash_map,1144307712 -random,memory,58720255,phmap::flat_hash_map,1144307712 -random,memory,58720256,phmap::flat_hash_map,3430486016 -random,memory,58721491,phmap::flat_hash_map,2287403008 -random,time,60000000,phmap::flat_hash_map,8.700000 -random,memory,60002661,phmap::flat_hash_map,2287403008 -random,time,70000000,phmap::flat_hash_map,10.155000 -random,memory,70010887,phmap::flat_hash_map,2287403008 -random,time,80000000,phmap::flat_hash_map,11.589000 -random,memory,80002681,phmap::flat_hash_map,2287403008 -random,time,90000000,phmap::flat_hash_map,13.020000 -random,memory,90012468,phmap::flat_hash_map,2287403008 -random,time,100000000,phmap::flat_hash_map,14.407000 -random,memory,0,phmap::parallel_flat_hash_map,6688768 -random,memory,228584,phmap::parallel_flat_hash_map,6688768 -random,memory,228585,phmap::parallel_flat_hash_map,8372224 -random,memory,230683,phmap::parallel_flat_hash_map,8372224 -random,memory,230684,phmap::parallel_flat_hash_map,11190272 -random,memory,257042,phmap::parallel_flat_hash_map,11190272 -random,memory,257105,phmap::parallel_flat_hash_map,11751424 -random,memory,456008,phmap::parallel_flat_hash_map,11751424 -random,memory,456009,phmap::parallel_flat_hash_map,13996032 -random,memory,456819,phmap::parallel_flat_hash_map,13582336 -random,memory,458740,phmap::parallel_flat_hash_map,13582336 -random,memory,458741,phmap::parallel_flat_hash_map,15847424 -random,memory,459266,phmap::parallel_flat_hash_map,15847424 -random,memory,459267,phmap::parallel_flat_hash_map,17518592 -random,memory,465267,phmap::parallel_flat_hash_map,17518592 -random,memory,465268,phmap::parallel_flat_hash_map,21155840 -random,memory,494937,phmap::parallel_flat_hash_map,20598784 -random,memory,911236,phmap::parallel_flat_hash_map,20598784 -random,memory,911237,phmap::parallel_flat_hash_map,22835200 -random,memory,913145,phmap::parallel_flat_hash_map,22835200 -random,memory,913146,phmap::parallel_flat_hash_map,25067520 -random,memory,914338,phmap::parallel_flat_hash_map,25067520 -random,memory,914339,phmap::parallel_flat_hash_map,27303936 -random,memory,916480,phmap::parallel_flat_hash_map,27303936 -random,memory,916481,phmap::parallel_flat_hash_map,29536256 -random,memory,916523,phmap::parallel_flat_hash_map,29536256 -random,memory,916524,phmap::parallel_flat_hash_map,30650368 -random,memory,917790,phmap::parallel_flat_hash_map,30650368 -random,memory,917791,phmap::parallel_flat_hash_map,32882688 -random,memory,918474,phmap::parallel_flat_hash_map,32882688 -random,memory,918475,phmap::parallel_flat_hash_map,35115008 -random,memory,924420,phmap::parallel_flat_hash_map,35115008 -random,memory,924421,phmap::parallel_flat_hash_map,37355520 -random,memory,937279,phmap::parallel_flat_hash_map,37355520 -random,memory,937319,phmap::parallel_flat_hash_map,38465536 -random,memory,1826595,phmap::parallel_flat_hash_map,38465536 -random,memory,1826596,phmap::parallel_flat_hash_map,42934272 -random,memory,1827102,phmap::parallel_flat_hash_map,42934272 -random,memory,1827103,phmap::parallel_flat_hash_map,45170688 -random,memory,1828265,phmap::parallel_flat_hash_map,45170688 -random,memory,1828266,phmap::parallel_flat_hash_map,47398912 -random,memory,1831166,phmap::parallel_flat_hash_map,47398912 -random,memory,1831167,phmap::parallel_flat_hash_map,49631232 -random,memory,1831515,phmap::parallel_flat_hash_map,49631232 -random,memory,1831516,phmap::parallel_flat_hash_map,51867648 -random,memory,1833264,phmap::parallel_flat_hash_map,51867648 -random,memory,1833265,phmap::parallel_flat_hash_map,54099968 -random,memory,1833345,phmap::parallel_flat_hash_map,54099968 -random,memory,1833346,phmap::parallel_flat_hash_map,56332288 -random,memory,1835078,phmap::parallel_flat_hash_map,56332288 -random,memory,1835079,phmap::parallel_flat_hash_map,58572800 -random,memory,1836213,phmap::parallel_flat_hash_map,58572800 -random,memory,1836214,phmap::parallel_flat_hash_map,60801024 -random,memory,1836364,phmap::parallel_flat_hash_map,60801024 -random,memory,1836365,phmap::parallel_flat_hash_map,63033344 -random,memory,1836849,phmap::parallel_flat_hash_map,63033344 -random,memory,1836850,phmap::parallel_flat_hash_map,65265664 -random,memory,1838065,phmap::parallel_flat_hash_map,65265664 -random,memory,1838066,phmap::parallel_flat_hash_map,67502080 -random,memory,1839241,phmap::parallel_flat_hash_map,67502080 -random,memory,1839242,phmap::parallel_flat_hash_map,69734400 -random,memory,1839771,phmap::parallel_flat_hash_map,69734400 -random,memory,1839772,phmap::parallel_flat_hash_map,71962624 -random,memory,1844031,phmap::parallel_flat_hash_map,71962624 -random,memory,1844032,phmap::parallel_flat_hash_map,74194944 -random,memory,1844165,phmap::parallel_flat_hash_map,74194944 -random,memory,1844166,phmap::parallel_flat_hash_map,76423168 -random,memory,1849529,phmap::parallel_flat_hash_map,74186752 -random,memory,3656347,phmap::parallel_flat_hash_map,74186752 -random,memory,3656348,phmap::parallel_flat_hash_map,83120128 -random,memory,3658236,phmap::parallel_flat_hash_map,83120128 -random,memory,3658237,phmap::parallel_flat_hash_map,87592960 -random,memory,3660832,phmap::parallel_flat_hash_map,87592960 -random,memory,3660833,phmap::parallel_flat_hash_map,92061696 -random,memory,3663897,phmap::parallel_flat_hash_map,92061696 -random,memory,3663898,phmap::parallel_flat_hash_map,96526336 -random,memory,3668004,phmap::parallel_flat_hash_map,96526336 -random,memory,3668005,phmap::parallel_flat_hash_map,100990976 -random,memory,3668781,phmap::parallel_flat_hash_map,100990976 -random,memory,3668782,phmap::parallel_flat_hash_map,105459712 -random,memory,3669316,phmap::parallel_flat_hash_map,100990976 -random,memory,3669736,phmap::parallel_flat_hash_map,100990976 -random,memory,3669737,phmap::parallel_flat_hash_map,109928448 -random,memory,3670780,phmap::parallel_flat_hash_map,109928448 -random,memory,3670781,phmap::parallel_flat_hash_map,114388992 -random,memory,3671735,phmap::parallel_flat_hash_map,114388992 -random,memory,3671736,phmap::parallel_flat_hash_map,118853632 -random,memory,3672742,phmap::parallel_flat_hash_map,118853632 -random,memory,3672743,phmap::parallel_flat_hash_map,123322368 -random,memory,3672841,phmap::parallel_flat_hash_map,123322368 -random,memory,3672842,phmap::parallel_flat_hash_map,127787008 -random,memory,3674661,phmap::parallel_flat_hash_map,127787008 -random,memory,3674662,phmap::parallel_flat_hash_map,132259840 -random,memory,3675918,phmap::parallel_flat_hash_map,132259840 -random,memory,3675919,phmap::parallel_flat_hash_map,136724480 -random,memory,3676502,phmap::parallel_flat_hash_map,136724480 -random,memory,3676503,phmap::parallel_flat_hash_map,141180928 -random,memory,3676556,phmap::parallel_flat_hash_map,141180928 -random,memory,3676557,phmap::parallel_flat_hash_map,145645568 -random,memory,3679943,phmap::parallel_flat_hash_map,141176832 -random,memory,3682847,phmap::parallel_flat_hash_map,141176832 -random,memory,3682848,phmap::parallel_flat_hash_map,150106112 -random,memory,3701420,phmap::parallel_flat_hash_map,145637376 -random,memory,7323550,phmap::parallel_flat_hash_map,145637376 -random,memory,7323551,phmap::parallel_flat_hash_map,163500032 -random,memory,7325955,phmap::parallel_flat_hash_map,154566656 -random,memory,7326781,phmap::parallel_flat_hash_map,154566656 -random,memory,7326782,phmap::parallel_flat_hash_map,172433408 -random,memory,7327471,phmap::parallel_flat_hash_map,172433408 -random,memory,7327472,phmap::parallel_flat_hash_map,181362688 -random,memory,7328548,phmap::parallel_flat_hash_map,181362688 -random,memory,7328549,phmap::parallel_flat_hash_map,190300160 -random,memory,7331571,phmap::parallel_flat_hash_map,190300160 -random,memory,7331572,phmap::parallel_flat_hash_map,199229440 -random,memory,7333270,phmap::parallel_flat_hash_map,199229440 -random,memory,7333271,phmap::parallel_flat_hash_map,208154624 -random,memory,7336330,phmap::parallel_flat_hash_map,208154624 -random,memory,7336331,phmap::parallel_flat_hash_map,217083904 -random,memory,7338941,phmap::parallel_flat_hash_map,217083904 -random,memory,7338942,phmap::parallel_flat_hash_map,226021376 -random,memory,7339987,phmap::parallel_flat_hash_map,226021376 -random,memory,7339988,phmap::parallel_flat_hash_map,234950656 -random,memory,7340192,phmap::parallel_flat_hash_map,234950656 -random,memory,7340193,phmap::parallel_flat_hash_map,243879936 -random,memory,7340212,phmap::parallel_flat_hash_map,243879936 -random,memory,7340213,phmap::parallel_flat_hash_map,252805120 -random,memory,7340756,phmap::parallel_flat_hash_map,252805120 -random,memory,7340757,phmap::parallel_flat_hash_map,261734400 -random,memory,7353138,phmap::parallel_flat_hash_map,261734400 -random,memory,7353139,phmap::parallel_flat_hash_map,270659584 -random,memory,7355638,phmap::parallel_flat_hash_map,270659584 -random,memory,7355639,phmap::parallel_flat_hash_map,279592960 -random,memory,7358552,phmap::parallel_flat_hash_map,279592960 -random,memory,7358553,phmap::parallel_flat_hash_map,288522240 -random,memory,7363002,phmap::parallel_flat_hash_map,279584768 -random,memory,7364175,phmap::parallel_flat_hash_map,279584768 -random,memory,7364176,phmap::parallel_flat_hash_map,297451520 -random,memory,7379232,phmap::parallel_flat_hash_map,288518144 -random,time,10000000,phmap::parallel_flat_hash_map,1.115000 -random,memory,10001837,phmap::parallel_flat_hash_map,288518144 -random,memory,14641981,phmap::parallel_flat_hash_map,288518144 -random,memory,14641982,phmap::parallel_flat_hash_map,324243456 -random,memory,14649536,phmap::parallel_flat_hash_map,306380800 -random,memory,14658094,phmap::parallel_flat_hash_map,306380800 -random,memory,14658095,phmap::parallel_flat_hash_map,342106112 -random,memory,14663598,phmap::parallel_flat_hash_map,342106112 -random,memory,14663599,phmap::parallel_flat_hash_map,359968768 -random,memory,14667323,phmap::parallel_flat_hash_map,359968768 -random,memory,14667324,phmap::parallel_flat_hash_map,377835520 -random,memory,14668425,phmap::parallel_flat_hash_map,377835520 -random,memory,14668426,phmap::parallel_flat_hash_map,395694080 -random,memory,14669145,phmap::parallel_flat_hash_map,395694080 -random,memory,14669146,phmap::parallel_flat_hash_map,413560832 -random,memory,14669916,phmap::parallel_flat_hash_map,395698176 -random,memory,14673506,phmap::parallel_flat_hash_map,395698176 -random,memory,14673507,phmap::parallel_flat_hash_map,431423488 -random,memory,14676201,phmap::parallel_flat_hash_map,431423488 -random,memory,14676202,phmap::parallel_flat_hash_map,449286144 -random,memory,14681323,phmap::parallel_flat_hash_map,449286144 -random,memory,14681324,phmap::parallel_flat_hash_map,467152896 -random,memory,14684771,phmap::parallel_flat_hash_map,449286144 -random,memory,14686498,phmap::parallel_flat_hash_map,449286144 -random,memory,14686499,phmap::parallel_flat_hash_map,485011456 -random,memory,14691299,phmap::parallel_flat_hash_map,485011456 -random,memory,14691300,phmap::parallel_flat_hash_map,502865920 -random,memory,14694531,phmap::parallel_flat_hash_map,502865920 -random,memory,14694532,phmap::parallel_flat_hash_map,520728576 -random,memory,14696324,phmap::parallel_flat_hash_map,520728576 -random,memory,14696325,phmap::parallel_flat_hash_map,538587136 -random,memory,14701255,phmap::parallel_flat_hash_map,538587136 -random,memory,14701256,phmap::parallel_flat_hash_map,556445696 -random,memory,14702651,phmap::parallel_flat_hash_map,538578944 -random,memory,14704639,phmap::parallel_flat_hash_map,538578944 -random,memory,14704640,phmap::parallel_flat_hash_map,574300160 -random,memory,14707748,phmap::parallel_flat_hash_map,574300160 -random,memory,14707749,phmap::parallel_flat_hash_map,592158720 -random,memory,14711651,phmap::parallel_flat_hash_map,574296064 -random,time,20000000,phmap::parallel_flat_hash_map,2.484000 -random,memory,20006564,phmap::parallel_flat_hash_map,574296064 -random,memory,29321443,phmap::parallel_flat_hash_map,574296064 -random,memory,29321444,phmap::parallel_flat_hash_map,645742592 -random,memory,29322576,phmap::parallel_flat_hash_map,645742592 -random,memory,29322577,phmap::parallel_flat_hash_map,681463808 -random,memory,29331685,phmap::parallel_flat_hash_map,645742592 -random,memory,29333141,phmap::parallel_flat_hash_map,645742592 -random,memory,29333142,phmap::parallel_flat_hash_map,717189120 -random,memory,29336447,phmap::parallel_flat_hash_map,717189120 -random,memory,29336448,phmap::parallel_flat_hash_map,752910336 -random,memory,29345055,phmap::parallel_flat_hash_map,717185024 -random,memory,29352413,phmap::parallel_flat_hash_map,717185024 -random,memory,29352414,phmap::parallel_flat_hash_map,788627456 -random,memory,29357051,phmap::parallel_flat_hash_map,752902144 -random,memory,29357141,phmap::parallel_flat_hash_map,752902144 -random,memory,29357142,phmap::parallel_flat_hash_map,824348672 -random,memory,29359481,phmap::parallel_flat_hash_map,824348672 -random,memory,29359482,phmap::parallel_flat_hash_map,860069888 -random,memory,29362077,phmap::parallel_flat_hash_map,860069888 -random,memory,29362078,phmap::parallel_flat_hash_map,895795200 -random,memory,29366120,phmap::parallel_flat_hash_map,895795200 -random,memory,29366121,phmap::parallel_flat_hash_map,931512320 -random,memory,29368098,phmap::parallel_flat_hash_map,931512320 -random,memory,29368099,phmap::parallel_flat_hash_map,967229440 -random,memory,29368978,phmap::parallel_flat_hash_map,931508224 -random,memory,29373953,phmap::parallel_flat_hash_map,931508224 -random,memory,29373954,phmap::parallel_flat_hash_map,1002954752 -random,memory,29376083,phmap::parallel_flat_hash_map,1002954752 -random,memory,29376084,phmap::parallel_flat_hash_map,1038680064 -random,memory,29378406,phmap::parallel_flat_hash_map,1002950656 -random,memory,29378970,phmap::parallel_flat_hash_map,1002950656 -random,memory,29378971,phmap::parallel_flat_hash_map,1074397184 -random,memory,29380083,phmap::parallel_flat_hash_map,1074397184 -random,memory,29380084,phmap::parallel_flat_hash_map,1110122496 -random,memory,29381250,phmap::parallel_flat_hash_map,1110122496 -random,memory,29381251,phmap::parallel_flat_hash_map,1145839616 -random,memory,29384576,phmap::parallel_flat_hash_map,1110110208 -random,memory,29391514,phmap::parallel_flat_hash_map,1110110208 -random,memory,29391515,phmap::parallel_flat_hash_map,1181556736 -random,memory,29398827,phmap::parallel_flat_hash_map,1145835520 -random,time,30000000,phmap::parallel_flat_hash_map,4.197000 -random,memory,30003726,phmap::parallel_flat_hash_map,1145835520 -random,time,40000000,phmap::parallel_flat_hash_map,5.407000 -random,memory,40004260,phmap::parallel_flat_hash_map,1145835520 -random,time,50000000,phmap::parallel_flat_hash_map,6.651000 -random,memory,50008463,phmap::parallel_flat_hash_map,1145835520 -random,memory,58650774,phmap::parallel_flat_hash_map,1145835520 -random,memory,58650775,phmap::parallel_flat_hash_map,1288724480 -random,memory,58651341,phmap::parallel_flat_hash_map,1217282048 -random,memory,58670748,phmap::parallel_flat_hash_map,1217282048 -random,memory,58670749,phmap::parallel_flat_hash_map,1360171008 -random,memory,58672543,phmap::parallel_flat_hash_map,1288724480 -random,memory,58679343,phmap::parallel_flat_hash_map,1288724480 -random,memory,58679344,phmap::parallel_flat_hash_map,1431609344 -random,memory,58679621,phmap::parallel_flat_hash_map,1431609344 -random,memory,58679622,phmap::parallel_flat_hash_map,1503047680 -random,memory,58681449,phmap::parallel_flat_hash_map,1503047680 -random,memory,58681450,phmap::parallel_flat_hash_map,1574498304 -random,memory,58687442,phmap::parallel_flat_hash_map,1503055872 -random,memory,58716081,phmap::parallel_flat_hash_map,1503055872 -random,memory,58716082,phmap::parallel_flat_hash_map,1645944832 -random,memory,58719988,phmap::parallel_flat_hash_map,1574498304 -random,memory,58720496,phmap::parallel_flat_hash_map,1574498304 -random,memory,58720497,phmap::parallel_flat_hash_map,1717387264 -random,memory,58723911,phmap::parallel_flat_hash_map,1717387264 -random,memory,58723912,phmap::parallel_flat_hash_map,1788841984 -random,memory,58728095,phmap::parallel_flat_hash_map,1717395456 -random,memory,58731608,phmap::parallel_flat_hash_map,1717395456 -random,memory,58731609,phmap::parallel_flat_hash_map,1860280320 -random,memory,58737338,phmap::parallel_flat_hash_map,1860280320 -random,memory,58737339,phmap::parallel_flat_hash_map,1931718656 -random,memory,58737397,phmap::parallel_flat_hash_map,1860272128 -random,memory,58744726,phmap::parallel_flat_hash_map,1860272128 -random,memory,58744727,phmap::parallel_flat_hash_map,2003165184 -random,memory,58746205,phmap::parallel_flat_hash_map,1931718656 -random,memory,58752523,phmap::parallel_flat_hash_map,1931718656 -random,memory,58752524,phmap::parallel_flat_hash_map,2074607616 -random,memory,58753838,phmap::parallel_flat_hash_map,2074607616 -random,memory,58753839,phmap::parallel_flat_hash_map,2146054144 -random,memory,58756083,phmap::parallel_flat_hash_map,2146054144 -random,memory,58756084,phmap::parallel_flat_hash_map,2217496576 -random,memory,58758266,phmap::parallel_flat_hash_map,2217496576 -random,memory,58758267,phmap::parallel_flat_hash_map,2288943104 -random,memory,58760494,phmap::parallel_flat_hash_map,2217492480 -random,memory,58766191,phmap::parallel_flat_hash_map,2217492480 -random,memory,58766192,phmap::parallel_flat_hash_map,2360381440 -random,memory,58773720,phmap::parallel_flat_hash_map,2288934912 -random,time,60000000,phmap::parallel_flat_hash_map,9.067000 -random,memory,60002586,phmap::parallel_flat_hash_map,2288934912 -random,time,70000000,phmap::parallel_flat_hash_map,10.409000 -random,memory,70011900,phmap::parallel_flat_hash_map,2288934912 -random,time,80000000,phmap::parallel_flat_hash_map,11.747000 -random,memory,80002981,phmap::parallel_flat_hash_map,2288934912 -random,time,90000000,phmap::parallel_flat_hash_map,13.091000 -random,memory,90007030,phmap::parallel_flat_hash_map,2288934912 -random,time,100000000,phmap::parallel_flat_hash_map,14.467000 diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_various_N b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_various_N deleted file mode 100644 index 352a033..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/benchmark/results/output_various_N +++ /dev/null @@ -1,838 +0,0 @@ -random,memory,0,phmap::flat_hash_map,15736832 -random,memory,262696,phmap::flat_hash_map,15736832 -random,memory,262879,phmap::flat_hash_map,11268096 -random,memory,458751,phmap::flat_hash_map,11268096 -random,memory,458752,phmap::flat_hash_map,29134848 -random,memory,493071,phmap::flat_hash_map,29134848 -random,memory,493277,phmap::flat_hash_map,20201472 -random,memory,680181,phmap::flat_hash_map,20201472 -random,memory,680399,phmap::flat_hash_map,19083264 -random,memory,917503,phmap::flat_hash_map,19083264 -random,memory,917504,phmap::flat_hash_map,54808576 -random,memory,927756,phmap::flat_hash_map,54808576 -random,memory,927844,phmap::flat_hash_map,36941824 -random,memory,1835007,phmap::flat_hash_map,36941824 -random,memory,1835008,phmap::flat_hash_map,108388352 -random,memory,1840514,phmap::flat_hash_map,108388352 -random,memory,1840562,phmap::flat_hash_map,72663040 -random,memory,3670015,phmap::flat_hash_map,72663040 -random,memory,3670016,phmap::flat_hash_map,215552000 -random,memory,3672773,phmap::flat_hash_map,215552000 -random,memory,3672818,phmap::flat_hash_map,144105472 -random,memory,7340031,phmap::flat_hash_map,144105472 -random,memory,7340032,phmap::flat_hash_map,429887488 -random,memory,7341507,phmap::flat_hash_map,429887488 -random,memory,7341551,phmap::flat_hash_map,286994432 -random,time,10000000,phmap::flat_hash_map,0.959000 -random,memory,10013489,phmap::flat_hash_map,286994432 -random,memory,14680063,phmap::flat_hash_map,286994432 -random,memory,14680064,phmap::flat_hash_map,858537984 -random,memory,14689336,phmap::flat_hash_map,858537984 -random,memory,14689395,phmap::flat_hash_map,572764160 -random,time,20000000,phmap::flat_hash_map,2.235000 -random,memory,20014360,phmap::flat_hash_map,572764160 -random,memory,29360127,phmap::flat_hash_map,572764160 -random,memory,29360128,phmap::flat_hash_map,1715855360 -random,memory,29362598,phmap::flat_hash_map,1715855360 -random,memory,29362641,phmap::flat_hash_map,1144303616 -random,time,30000000,phmap::flat_hash_map,3.893000 -random,memory,30002217,phmap::flat_hash_map,1144303616 -random,time,40000000,phmap::flat_hash_map,5.194000 -random,memory,40013295,phmap::flat_hash_map,1144303616 -random,time,50000000,phmap::flat_hash_map,6.478000 -random,memory,50004165,phmap::flat_hash_map,1144303616 -random,memory,58720255,phmap::flat_hash_map,1144303616 -random,memory,58720256,phmap::flat_hash_map,3430486016 -random,memory,58730855,phmap::flat_hash_map,3430486016 -random,memory,58730895,phmap::flat_hash_map,2287407104 -random,time,60000000,phmap::flat_hash_map,8.861000 -random,memory,60005803,phmap::flat_hash_map,2287407104 -random,time,70000000,phmap::flat_hash_map,10.361000 -random,memory,70008717,phmap::flat_hash_map,2287407104 -random,time,80000000,phmap::flat_hash_map,11.848000 -random,memory,80003058,phmap::flat_hash_map,2287407104 -random,time,90000000,phmap::flat_hash_map,13.318000 -random,memory,90012411,phmap::flat_hash_map,2287407104 -random,time,100000000,phmap::flat_hash_map,14.773000 -random,memory,0,phmap::parallel_flat_hash_map_4,8425472 -random,memory,230139,phmap::parallel_flat_hash_map_4,8503296 -random,memory,230140,phmap::parallel_flat_hash_map_4,10977280 -random,memory,253089,phmap::parallel_flat_hash_map_4,10977280 -random,memory,253158,phmap::parallel_flat_hash_map_4,13225984 -random,memory,454288,phmap::parallel_flat_hash_map_4,13225984 -random,memory,454289,phmap::parallel_flat_hash_map_4,14352384 -random,memory,456498,phmap::parallel_flat_hash_map_4,14352384 -random,memory,456499,phmap::parallel_flat_hash_map_4,14082048 -random,memory,457735,phmap::parallel_flat_hash_map_4,14082048 -random,memory,457736,phmap::parallel_flat_hash_map_4,15777792 -random,memory,459898,phmap::parallel_flat_hash_map_4,15777792 -random,memory,459899,phmap::parallel_flat_hash_map_4,18591744 -random,memory,461397,phmap::parallel_flat_hash_map_4,18591744 -random,memory,461398,phmap::parallel_flat_hash_map_4,20283392 -random,memory,471716,phmap::parallel_flat_hash_map_4,20283392 -random,memory,471760,phmap::parallel_flat_hash_map_4,21397504 -random,memory,911802,phmap::parallel_flat_hash_map_4,21397504 -random,memory,911803,phmap::parallel_flat_hash_map_4,23633920 -random,memory,911912,phmap::parallel_flat_hash_map_4,23633920 -random,memory,911963,phmap::parallel_flat_hash_map_4,22515712 -random,memory,912599,phmap::parallel_flat_hash_map_4,22515712 -random,memory,912600,phmap::parallel_flat_hash_map_4,25866240 -random,memory,915388,phmap::parallel_flat_hash_map_4,25866240 -random,memory,915389,phmap::parallel_flat_hash_map_4,28098560 -random,memory,916642,phmap::parallel_flat_hash_map_4,28098560 -random,memory,916643,phmap::parallel_flat_hash_map_4,30339072 -random,memory,917177,phmap::parallel_flat_hash_map_4,30339072 -random,memory,917178,phmap::parallel_flat_hash_map_4,32575488 -random,memory,917415,phmap::parallel_flat_hash_map_4,32575488 -random,memory,917416,phmap::parallel_flat_hash_map_4,33689600 -random,memory,920064,phmap::parallel_flat_hash_map_4,33689600 -random,memory,920065,phmap::parallel_flat_hash_map_4,35917824 -random,memory,921982,phmap::parallel_flat_hash_map_4,35917824 -random,memory,921983,phmap::parallel_flat_hash_map_4,38154240 -random,memory,922325,phmap::parallel_flat_hash_map_4,39272448 -random,memory,1826091,phmap::parallel_flat_hash_map_4,39268352 -random,memory,1826092,phmap::parallel_flat_hash_map_4,43737088 -random,memory,1827754,phmap::parallel_flat_hash_map_4,43737088 -random,memory,1827755,phmap::parallel_flat_hash_map_4,45973504 -random,memory,1830054,phmap::parallel_flat_hash_map_4,45973504 -random,memory,1830055,phmap::parallel_flat_hash_map_4,48209920 -random,memory,1830521,phmap::parallel_flat_hash_map_4,48209920 -random,memory,1830522,phmap::parallel_flat_hash_map_4,50438144 -random,memory,1831966,phmap::parallel_flat_hash_map_4,50438144 -random,memory,1831967,phmap::parallel_flat_hash_map_4,52666368 -random,memory,1832841,phmap::parallel_flat_hash_map_4,52666368 -random,memory,1832842,phmap::parallel_flat_hash_map_4,59375616 -random,memory,1836262,phmap::parallel_flat_hash_map_4,59367424 -random,memory,1836263,phmap::parallel_flat_hash_map_4,61603840 -random,memory,1836728,phmap::parallel_flat_hash_map_4,61603840 -random,memory,1836729,phmap::parallel_flat_hash_map_4,63836160 -random,memory,1836910,phmap::parallel_flat_hash_map_4,63836160 -random,memory,1836911,phmap::parallel_flat_hash_map_4,66064384 -random,memory,1838008,phmap::parallel_flat_hash_map_4,66064384 -random,memory,1838009,phmap::parallel_flat_hash_map_4,68300800 -random,memory,1839009,phmap::parallel_flat_hash_map_4,68300800 -random,memory,1839010,phmap::parallel_flat_hash_map_4,70529024 -random,memory,1842043,phmap::parallel_flat_hash_map_4,70529024 -random,memory,1842044,phmap::parallel_flat_hash_map_4,72761344 -random,memory,1842263,phmap::parallel_flat_hash_map_4,72761344 -random,memory,1842264,phmap::parallel_flat_hash_map_4,74993664 -random,memory,1843365,phmap::parallel_flat_hash_map_4,77230080 -random,memory,1902147,phmap::parallel_flat_hash_map_4,77230080 -random,memory,1902233,phmap::parallel_flat_hash_map_4,74997760 -random,memory,3656950,phmap::parallel_flat_hash_map_4,74997760 -random,memory,3656951,phmap::parallel_flat_hash_map_4,83935232 -random,memory,3658625,phmap::parallel_flat_hash_map_4,83939328 -random,memory,3658626,phmap::parallel_flat_hash_map_4,88403968 -random,memory,3660741,phmap::parallel_flat_hash_map_4,88403968 -random,memory,3660742,phmap::parallel_flat_hash_map_4,92872704 -random,memory,3663376,phmap::parallel_flat_hash_map_4,92872704 -random,memory,3663377,phmap::parallel_flat_hash_map_4,97341440 -random,memory,3665288,phmap::parallel_flat_hash_map_4,97341440 -random,memory,3665289,phmap::parallel_flat_hash_map_4,101801984 -random,memory,3668214,phmap::parallel_flat_hash_map_4,101801984 -random,memory,3668215,phmap::parallel_flat_hash_map_4,106270720 -random,memory,3669632,phmap::parallel_flat_hash_map_4,106270720 -random,memory,3669633,phmap::parallel_flat_hash_map_4,110731264 -random,memory,3670422,phmap::parallel_flat_hash_map_4,110731264 -random,memory,3670423,phmap::parallel_flat_hash_map_4,115195904 -random,memory,3671980,phmap::parallel_flat_hash_map_4,115195904 -random,memory,3671981,phmap::parallel_flat_hash_map_4,119656448 -random,memory,3673456,phmap::parallel_flat_hash_map_4,119656448 -random,memory,3673457,phmap::parallel_flat_hash_map_4,124116992 -random,memory,3675504,phmap::parallel_flat_hash_map_4,124116992 -random,memory,3675548,phmap::parallel_flat_hash_map_4,119644160 -random,memory,3675608,phmap::parallel_flat_hash_map_4,119644160 -random,memory,3675609,phmap::parallel_flat_hash_map_4,137519104 -random,memory,3676812,phmap::parallel_flat_hash_map_4,137515008 -random,memory,3676813,phmap::parallel_flat_hash_map_4,141983744 -random,memory,3677010,phmap::parallel_flat_hash_map_4,141983744 -random,memory,3677011,phmap::parallel_flat_hash_map_4,146444288 -random,memory,3682438,phmap::parallel_flat_hash_map_4,146444288 -random,memory,3682439,phmap::parallel_flat_hash_map_4,150904832 -random,memory,3705594,phmap::parallel_flat_hash_map_4,150904832 -random,memory,3705595,phmap::parallel_flat_hash_map_4,146440192 -random,memory,7320177,phmap::parallel_flat_hash_map_4,146440192 -random,memory,7320178,phmap::parallel_flat_hash_map_4,164302848 -random,memory,7321389,phmap::parallel_flat_hash_map_4,164302848 -random,memory,7321390,phmap::parallel_flat_hash_map_4,173240320 -random,memory,7332480,phmap::parallel_flat_hash_map_4,173240320 -random,memory,7332481,phmap::parallel_flat_hash_map_4,182169600 -random,memory,7334320,phmap::parallel_flat_hash_map_4,182169600 -random,memory,7334321,phmap::parallel_flat_hash_map_4,191094784 -random,memory,7338310,phmap::parallel_flat_hash_map_4,191094784 -random,memory,7338311,phmap::parallel_flat_hash_map_4,200028160 -random,memory,7338542,phmap::parallel_flat_hash_map_4,200028160 -random,memory,7338543,phmap::parallel_flat_hash_map_4,208961536 -random,memory,7339365,phmap::parallel_flat_hash_map_4,208961536 -random,memory,7339366,phmap::parallel_flat_hash_map_4,217899008 -random,memory,7339930,phmap::parallel_flat_hash_map_4,217899008 -random,memory,7339931,phmap::parallel_flat_hash_map_4,226828288 -random,memory,7341026,phmap::parallel_flat_hash_map_4,226828288 -random,memory,7341027,phmap::parallel_flat_hash_map_4,253620224 -random,memory,7345230,phmap::parallel_flat_hash_map_4,253616128 -random,memory,7345231,phmap::parallel_flat_hash_map_4,262545408 -random,memory,7347839,phmap::parallel_flat_hash_map_4,262545408 -random,memory,7347840,phmap::parallel_flat_hash_map_4,271470592 -random,memory,7349398,phmap::parallel_flat_hash_map_4,271470592 -random,memory,7349399,phmap::parallel_flat_hash_map_4,280399872 -random,memory,7350072,phmap::parallel_flat_hash_map_4,280399872 -random,memory,7350073,phmap::parallel_flat_hash_map_4,289329152 -random,memory,7358606,phmap::parallel_flat_hash_map_4,289329152 -random,memory,7358607,phmap::parallel_flat_hash_map_4,298262528 -random,memory,7418699,phmap::parallel_flat_hash_map_4,298262528 -random,memory,7418807,phmap::parallel_flat_hash_map_4,289329152 -random,time,10000000,phmap::parallel_flat_hash_map_4,0.775000 -random,memory,10143429,phmap::parallel_flat_hash_map_4,288841728 -random,memory,14656465,phmap::parallel_flat_hash_map_4,288841728 -random,memory,14656466,phmap::parallel_flat_hash_map_4,324567040 -random,memory,14666142,phmap::parallel_flat_hash_map_4,324567040 -random,memory,14666153,phmap::parallel_flat_hash_map_4,342429696 -random,memory,14668185,phmap::parallel_flat_hash_map_4,342429696 -random,memory,14668186,phmap::parallel_flat_hash_map_4,360292352 -random,memory,14669296,phmap::parallel_flat_hash_map_4,360292352 -random,memory,14669297,phmap::parallel_flat_hash_map_4,378155008 -random,memory,14669466,phmap::parallel_flat_hash_map_4,378155008 -random,memory,14669467,phmap::parallel_flat_hash_map_4,396017664 -random,memory,14673507,phmap::parallel_flat_hash_map_4,396017664 -random,memory,14673508,phmap::parallel_flat_hash_map_4,413876224 -random,memory,14677314,phmap::parallel_flat_hash_map_4,413876224 -random,memory,14677315,phmap::parallel_flat_hash_map_4,467460096 -random,memory,14677330,phmap::parallel_flat_hash_map_4,467460096 -random,memory,14677331,phmap::parallel_flat_hash_map_4,449593344 -random,memory,14681511,phmap::parallel_flat_hash_map_4,449593344 -random,memory,14681512,phmap::parallel_flat_hash_map_4,467456000 -random,memory,14681884,phmap::parallel_flat_hash_map_4,467456000 -random,memory,14681885,phmap::parallel_flat_hash_map_4,485318656 -random,memory,14685984,phmap::parallel_flat_hash_map_4,485318656 -random,memory,14685985,phmap::parallel_flat_hash_map_4,503173120 -random,memory,14692178,phmap::parallel_flat_hash_map_4,503173120 -random,memory,14692179,phmap::parallel_flat_hash_map_4,521035776 -random,memory,14693317,phmap::parallel_flat_hash_map_4,521035776 -random,memory,14693318,phmap::parallel_flat_hash_map_4,538902528 -random,memory,14695057,phmap::parallel_flat_hash_map_4,538902528 -random,memory,14695058,phmap::parallel_flat_hash_map_4,556769280 -random,memory,14696242,phmap::parallel_flat_hash_map_4,556769280 -random,memory,14696243,phmap::parallel_flat_hash_map_4,574623744 -random,memory,14699788,phmap::parallel_flat_hash_map_4,574623744 -random,memory,14699789,phmap::parallel_flat_hash_map_4,592478208 -random,memory,14724882,phmap::parallel_flat_hash_map_4,592478208 -random,memory,14724958,phmap::parallel_flat_hash_map_4,574607360 -random,time,20000000,phmap::parallel_flat_hash_map_4,1.624000 -random,memory,20059632,phmap::parallel_flat_hash_map_4,574607360 -random,memory,29320741,phmap::parallel_flat_hash_map_4,574607360 -random,memory,29320742,phmap::parallel_flat_hash_map_4,646053888 -random,memory,29324660,phmap::parallel_flat_hash_map_4,646053888 -random,memory,29324661,phmap::parallel_flat_hash_map_4,681779200 -random,memory,29337731,phmap::parallel_flat_hash_map_4,681779200 -random,memory,29337732,phmap::parallel_flat_hash_map_4,717500416 -random,memory,29344679,phmap::parallel_flat_hash_map_4,717500416 -random,memory,29344680,phmap::parallel_flat_hash_map_4,753221632 -random,memory,29345420,phmap::parallel_flat_hash_map_4,753221632 -random,memory,29345421,phmap::parallel_flat_hash_map_4,788938752 -random,memory,29346467,phmap::parallel_flat_hash_map_4,788938752 -random,memory,29346468,phmap::parallel_flat_hash_map_4,824659968 -random,memory,29350032,phmap::parallel_flat_hash_map_4,824659968 -random,memory,29350033,phmap::parallel_flat_hash_map_4,860385280 -random,memory,29363538,phmap::parallel_flat_hash_map_4,860385280 -random,memory,29363539,phmap::parallel_flat_hash_map_4,896106496 -random,memory,29368515,phmap::parallel_flat_hash_map_4,896106496 -random,memory,29368516,phmap::parallel_flat_hash_map_4,931831808 -random,memory,29377392,phmap::parallel_flat_hash_map_4,931831808 -random,memory,29377393,phmap::parallel_flat_hash_map_4,967553024 -random,memory,29378023,phmap::parallel_flat_hash_map_4,967553024 -random,memory,29378024,phmap::parallel_flat_hash_map_4,1003274240 -random,memory,29379201,phmap::parallel_flat_hash_map_4,1003274240 -random,memory,29379202,phmap::parallel_flat_hash_map_4,1038991360 -random,memory,29380427,phmap::parallel_flat_hash_map_4,1038991360 -random,memory,29380428,phmap::parallel_flat_hash_map_4,1074712576 -random,memory,29380936,phmap::parallel_flat_hash_map_4,1074712576 -random,memory,29380937,phmap::parallel_flat_hash_map_4,1110429696 -random,memory,29381222,phmap::parallel_flat_hash_map_4,1110429696 -random,memory,29381223,phmap::parallel_flat_hash_map_4,1146150912 -random,memory,29383327,phmap::parallel_flat_hash_map_4,1146150912 -random,memory,29383328,phmap::parallel_flat_hash_map_4,1181872128 -random,memory,29481786,phmap::parallel_flat_hash_map_4,1181872128 -random,memory,29482210,phmap::parallel_flat_hash_map_4,1146150912 -random,time,30000000,phmap::parallel_flat_hash_map_4,2.765000 -random,memory,30042200,phmap::parallel_flat_hash_map_4,1146150912 -random,time,40000000,phmap::parallel_flat_hash_map_4,3.366000 -random,memory,40007439,phmap::parallel_flat_hash_map_4,1146093568 -random,time,50000000,phmap::parallel_flat_hash_map_4,3.980000 -random,memory,50022406,phmap::parallel_flat_hash_map_4,1146150912 -random,memory,58667479,phmap::parallel_flat_hash_map_4,1146150912 -random,memory,58667498,phmap::parallel_flat_hash_map_4,1289039872 -random,memory,58667500,phmap::parallel_flat_hash_map_4,1289039872 -random,memory,58667517,phmap::parallel_flat_hash_map_4,1217597440 -random,memory,58679732,phmap::parallel_flat_hash_map_4,1217597440 -random,memory,58679733,phmap::parallel_flat_hash_map_4,1360486400 -random,memory,58694839,phmap::parallel_flat_hash_map_4,1360486400 -random,memory,58694840,phmap::parallel_flat_hash_map_4,1431928832 -random,memory,58698836,phmap::parallel_flat_hash_map_4,1431928832 -random,memory,58698837,phmap::parallel_flat_hash_map_4,1503367168 -random,memory,58699874,phmap::parallel_flat_hash_map_4,1503367168 -random,memory,58699875,phmap::parallel_flat_hash_map_4,1574809600 -random,memory,58709079,phmap::parallel_flat_hash_map_4,1574809600 -random,memory,58709080,phmap::parallel_flat_hash_map_4,1646252032 -random,memory,58719855,phmap::parallel_flat_hash_map_4,1646252032 -random,memory,58719863,phmap::parallel_flat_hash_map_4,1717694464 -random,memory,58722595,phmap::parallel_flat_hash_map_4,1717694464 -random,memory,58722596,phmap::parallel_flat_hash_map_4,1789149184 -random,memory,58724186,phmap::parallel_flat_hash_map_4,1789149184 -random,memory,58724187,phmap::parallel_flat_hash_map_4,1860595712 -random,memory,58728663,phmap::parallel_flat_hash_map_4,1860595712 -random,memory,58728664,phmap::parallel_flat_hash_map_4,1932034048 -random,memory,58734146,phmap::parallel_flat_hash_map_4,1932034048 -random,memory,58734147,phmap::parallel_flat_hash_map_4,2003476480 -random,memory,58739881,phmap::parallel_flat_hash_map_4,2003476480 -random,memory,58739882,phmap::parallel_flat_hash_map_4,2074923008 -random,memory,58743626,phmap::parallel_flat_hash_map_4,2074923008 -random,memory,58743627,phmap::parallel_flat_hash_map_4,2146361344 -random,memory,58748930,phmap::parallel_flat_hash_map_4,2146361344 -random,memory,58748931,phmap::parallel_flat_hash_map_4,2217803776 -random,memory,58752653,phmap::parallel_flat_hash_map_4,2217803776 -random,memory,58752654,phmap::parallel_flat_hash_map_4,2289242112 -random,memory,58761467,phmap::parallel_flat_hash_map_4,2289242112 -random,memory,58761468,phmap::parallel_flat_hash_map_4,2360680448 -random,memory,58767953,phmap::parallel_flat_hash_map_4,2360680448 -random,memory,58767993,phmap::parallel_flat_hash_map_4,2289233920 -random,time,60000000,phmap::parallel_flat_hash_map_4,5.747000 -random,memory,60013676,phmap::parallel_flat_hash_map_4,2289242112 -random,time,70000000,phmap::parallel_flat_hash_map_4,6.369000 -random,memory,70124243,phmap::parallel_flat_hash_map_4,2289233920 -random,time,80000000,phmap::parallel_flat_hash_map_4,7.042000 -random,memory,80028849,phmap::parallel_flat_hash_map_4,2289238016 -random,time,90000000,phmap::parallel_flat_hash_map_4,7.670000 -random,memory,90048047,phmap::parallel_flat_hash_map_4,2289238016 -random,time,100000000,phmap::parallel_flat_hash_map_4,8.369000 -random,memory,0,phmap::parallel_flat_hash_map_5,8171520 -random,memory,230048,phmap::parallel_flat_hash_map_5,8171520 -random,memory,230049,phmap::parallel_flat_hash_map_5,11292672 -random,memory,447941,phmap::parallel_flat_hash_map_5,11292672 -random,memory,447942,phmap::parallel_flat_hash_map_5,12705792 -random,memory,454330,phmap::parallel_flat_hash_map_5,12705792 -random,memory,454331,phmap::parallel_flat_hash_map_5,16080896 -random,memory,459541,phmap::parallel_flat_hash_map_5,16080896 -random,memory,459542,phmap::parallel_flat_hash_map_5,18911232 -random,memory,463883,phmap::parallel_flat_hash_map_5,18911232 -random,memory,463884,phmap::parallel_flat_hash_map_5,23703552 -random,memory,907467,phmap::parallel_flat_hash_map_5,23703552 -random,memory,907468,phmap::parallel_flat_hash_map_5,25948160 -random,memory,910634,phmap::parallel_flat_hash_map_5,25948160 -random,memory,910635,phmap::parallel_flat_hash_map_5,25681920 -random,memory,912368,phmap::parallel_flat_hash_map_5,25681920 -random,memory,912369,phmap::parallel_flat_hash_map_5,25169920 -random,memory,914982,phmap::parallel_flat_hash_map_5,25169920 -random,memory,914983,phmap::parallel_flat_hash_map_5,29949952 -random,memory,918814,phmap::parallel_flat_hash_map_5,29949952 -random,memory,918815,phmap::parallel_flat_hash_map_5,32497664 -random,memory,920383,phmap::parallel_flat_hash_map_5,32497664 -random,memory,920384,phmap::parallel_flat_hash_map_5,33644544 -random,memory,922282,phmap::parallel_flat_hash_map_5,33644544 -random,memory,922283,phmap::parallel_flat_hash_map_5,35336192 -random,memory,924207,phmap::parallel_flat_hash_map_5,35336192 -random,memory,924208,phmap::parallel_flat_hash_map_5,38166528 -random,memory,1045187,phmap::parallel_flat_hash_map_5,38166528 -random,memory,1045188,phmap::parallel_flat_hash_map_5,39317504 -random,memory,1821687,phmap::parallel_flat_hash_map_5,39317504 -random,memory,1821688,phmap::parallel_flat_hash_map_5,41553920 -random,memory,1825706,phmap::parallel_flat_hash_map_5,41553920 -random,memory,1825707,phmap::parallel_flat_hash_map_5,43786240 -random,memory,1827224,phmap::parallel_flat_hash_map_5,43786240 -random,memory,1827225,phmap::parallel_flat_hash_map_5,47144960 -random,memory,1828449,phmap::parallel_flat_hash_map_5,48259072 -random,memory,1831516,phmap::parallel_flat_hash_map_5,48259072 -random,memory,1831517,phmap::parallel_flat_hash_map_5,52727808 -random,memory,1832911,phmap::parallel_flat_hash_map_5,52727808 -random,memory,1832912,phmap::parallel_flat_hash_map_5,56082432 -random,memory,1835515,phmap::parallel_flat_hash_map_5,56078336 -random,memory,1835516,phmap::parallel_flat_hash_map_5,60547072 -random,memory,1836508,phmap::parallel_flat_hash_map_5,60547072 -random,memory,1836509,phmap::parallel_flat_hash_map_5,62779392 -random,memory,1837620,phmap::parallel_flat_hash_map_5,63893504 -random,memory,1838419,phmap::parallel_flat_hash_map_5,63893504 -random,memory,1838420,phmap::parallel_flat_hash_map_5,68362240 -random,memory,1844110,phmap::parallel_flat_hash_map_5,68358144 -random,memory,1844111,phmap::parallel_flat_hash_map_5,71704576 -random,memory,1845194,phmap::parallel_flat_hash_map_5,72822784 -random,memory,1848605,phmap::parallel_flat_hash_map_5,72822784 -random,memory,1848606,phmap::parallel_flat_hash_map_5,76173312 -random,memory,1903391,phmap::parallel_flat_hash_map_5,76173312 -random,memory,2062005,phmap::parallel_flat_hash_map_5,75051008 -random,memory,3647995,phmap::parallel_flat_hash_map_5,75051008 -random,memory,3647996,phmap::parallel_flat_hash_map_5,79519744 -random,memory,3653763,phmap::parallel_flat_hash_map_5,81756160 -random,memory,3654750,phmap::parallel_flat_hash_map_5,83988480 -random,memory,3655941,phmap::parallel_flat_hash_map_5,86216704 -random,memory,3658224,phmap::parallel_flat_hash_map_5,88457216 -random,memory,3658959,phmap::parallel_flat_hash_map_5,90689536 -random,memory,3659868,phmap::parallel_flat_hash_map_5,90689536 -random,memory,3659869,phmap::parallel_flat_hash_map_5,97394688 -random,memory,3665747,phmap::parallel_flat_hash_map_5,99618816 -random,memory,3666483,phmap::parallel_flat_hash_map_5,101855232 -random,memory,3666786,phmap::parallel_flat_hash_map_5,104083456 -random,memory,3668303,phmap::parallel_flat_hash_map_5,106319872 -random,memory,3669798,phmap::parallel_flat_hash_map_5,108560384 -random,memory,3670157,phmap::parallel_flat_hash_map_5,110800896 -random,memory,3670634,phmap::parallel_flat_hash_map_5,110800896 -random,memory,3670635,phmap::parallel_flat_hash_map_5,117501952 -random,memory,3671412,phmap::parallel_flat_hash_map_5,117497856 -random,memory,3671413,phmap::parallel_flat_hash_map_5,124194816 -random,memory,3676143,phmap::parallel_flat_hash_map_5,126423040 -random,memory,3676807,phmap::parallel_flat_hash_map_5,128655360 -random,memory,3677068,phmap::parallel_flat_hash_map_5,128655360 -random,memory,3677069,phmap::parallel_flat_hash_map_5,135356416 -random,memory,3677181,phmap::parallel_flat_hash_map_5,135356416 -random,memory,3677182,phmap::parallel_flat_hash_map_5,133115904 -random,memory,3680571,phmap::parallel_flat_hash_map_5,135348224 -random,memory,3683612,phmap::parallel_flat_hash_map_5,137580544 -random,memory,3684355,phmap::parallel_flat_hash_map_5,139808768 -random,memory,3684486,phmap::parallel_flat_hash_map_5,142045184 -random,memory,3684703,phmap::parallel_flat_hash_map_5,144277504 -random,memory,3687105,phmap::parallel_flat_hash_map_5,146505728 -random,memory,3690770,phmap::parallel_flat_hash_map_5,148733952 -random,memory,3715764,phmap::parallel_flat_hash_map_5,148733952 -random,memory,3715808,phmap::parallel_flat_hash_map_5,146497536 -random,memory,7297554,phmap::parallel_flat_hash_map_5,146497536 -random,memory,7297555,phmap::parallel_flat_hash_map_5,155435008 -random,memory,7311903,phmap::parallel_flat_hash_map_5,159903744 -random,memory,7322014,phmap::parallel_flat_hash_map_5,164372480 -random,memory,7324284,phmap::parallel_flat_hash_map_5,168845312 -random,memory,7325861,phmap::parallel_flat_hash_map_5,168845312 -random,memory,7325862,phmap::parallel_flat_hash_map_5,182243328 -random,memory,7327665,phmap::parallel_flat_hash_map_5,186707968 -random,memory,7328023,phmap::parallel_flat_hash_map_5,186707968 -random,memory,7328024,phmap::parallel_flat_hash_map_5,200101888 -random,memory,7329789,phmap::parallel_flat_hash_map_5,200110080 -random,memory,7329790,phmap::parallel_flat_hash_map_5,213512192 -random,memory,7334043,phmap::parallel_flat_hash_map_5,217964544 -random,memory,7334415,phmap::parallel_flat_hash_map_5,222425088 -random,memory,7339734,phmap::parallel_flat_hash_map_5,226893824 -random,memory,7341649,phmap::parallel_flat_hash_map_5,231358464 -random,memory,7344137,phmap::parallel_flat_hash_map_5,235819008 -random,memory,7344731,phmap::parallel_flat_hash_map_5,240279552 -random,memory,7345981,phmap::parallel_flat_hash_map_5,244748288 -random,memory,7347533,phmap::parallel_flat_hash_map_5,249208832 -random,memory,7348108,phmap::parallel_flat_hash_map_5,253677568 -random,memory,7348995,phmap::parallel_flat_hash_map_5,258142208 -random,memory,7352977,phmap::parallel_flat_hash_map_5,262598656 -random,memory,7355355,phmap::parallel_flat_hash_map_5,267063296 -random,memory,7356095,phmap::parallel_flat_hash_map_5,271536128 -random,memory,7357263,phmap::parallel_flat_hash_map_5,276004864 -random,memory,7365455,phmap::parallel_flat_hash_map_5,280465408 -random,memory,7367447,phmap::parallel_flat_hash_map_5,284930048 -random,memory,7371641,phmap::parallel_flat_hash_map_5,289386496 -random,memory,7378137,phmap::parallel_flat_hash_map_5,289386496 -random,memory,7378146,phmap::parallel_flat_hash_map_5,284917760 -random,memory,7389605,phmap::parallel_flat_hash_map_5,284917760 -random,memory,7389606,phmap::parallel_flat_hash_map_5,293851136 -random,memory,7544723,phmap::parallel_flat_hash_map_5,293851136 -random,memory,7544854,phmap::parallel_flat_hash_map_5,289382400 -random,time,10000000,phmap::parallel_flat_hash_map_5,0.706000 -random,memory,10026761,phmap::parallel_flat_hash_map_5,288780288 -random,memory,14626943,phmap::parallel_flat_hash_map_5,288780288 -random,memory,14626967,phmap::parallel_flat_hash_map_5,306647040 -random,memory,14639253,phmap::parallel_flat_hash_map_5,306647040 -random,memory,14639317,phmap::parallel_flat_hash_map_5,297713664 -random,memory,14649711,phmap::parallel_flat_hash_map_5,297713664 -random,memory,14649712,phmap::parallel_flat_hash_map_5,315576320 -random,memory,14652713,phmap::parallel_flat_hash_map_5,324509696 -random,memory,14654833,phmap::parallel_flat_hash_map_5,333443072 -random,memory,14662126,phmap::parallel_flat_hash_map_5,342380544 -random,memory,14663094,phmap::parallel_flat_hash_map_5,351309824 -random,memory,14666099,phmap::parallel_flat_hash_map_5,360239104 -random,memory,14670830,phmap::parallel_flat_hash_map_5,360239104 -random,memory,14670831,phmap::parallel_flat_hash_map_5,387043328 -random,memory,14672442,phmap::parallel_flat_hash_map_5,387043328 -random,memory,14672443,phmap::parallel_flat_hash_map_5,404893696 -random,memory,14672704,phmap::parallel_flat_hash_map_5,404893696 -random,memory,14672705,phmap::parallel_flat_hash_map_5,422756352 -random,memory,14676798,phmap::parallel_flat_hash_map_5,431681536 -random,memory,14677479,phmap::parallel_flat_hash_map_5,440610816 -random,memory,14677635,phmap::parallel_flat_hash_map_5,440610816 -random,memory,14677636,phmap::parallel_flat_hash_map_5,467410944 -random,memory,14677806,phmap::parallel_flat_hash_map_5,467410944 -random,memory,14677964,phmap::parallel_flat_hash_map_5,449540096 -random,memory,14679982,phmap::parallel_flat_hash_map_5,449540096 -random,memory,14679983,phmap::parallel_flat_hash_map_5,467406848 -random,memory,14681796,phmap::parallel_flat_hash_map_5,476340224 -random,memory,14683621,phmap::parallel_flat_hash_map_5,485265408 -random,memory,14685751,phmap::parallel_flat_hash_map_5,494202880 -random,memory,14686247,phmap::parallel_flat_hash_map_5,503128064 -random,memory,14689965,phmap::parallel_flat_hash_map_5,503128064 -random,memory,14689966,phmap::parallel_flat_hash_map_5,529915904 -random,memory,14701543,phmap::parallel_flat_hash_map_5,538849280 -random,memory,14702897,phmap::parallel_flat_hash_map_5,547774464 -random,memory,14706767,phmap::parallel_flat_hash_map_5,556707840 -random,memory,14718882,phmap::parallel_flat_hash_map_5,565637120 -random,memory,14723067,phmap::parallel_flat_hash_map_5,574562304 -random,memory,14725863,phmap::parallel_flat_hash_map_5,583491584 -random,memory,14830100,phmap::parallel_flat_hash_map_5,583491584 -random,memory,14830101,phmap::parallel_flat_hash_map_5,574558208 -random,time,20000000,phmap::parallel_flat_hash_map_5,1.394000 -random,memory,20032077,phmap::parallel_flat_hash_map_5,574541824 -random,memory,29275812,phmap::parallel_flat_hash_map_5,574541824 -random,memory,29275813,phmap::parallel_flat_hash_map_5,610267136 -random,memory,29289707,phmap::parallel_flat_hash_map_5,610267136 -random,memory,29289796,phmap::parallel_flat_hash_map_5,592404480 -random,memory,29315566,phmap::parallel_flat_hash_map_5,592404480 -random,memory,29315567,phmap::parallel_flat_hash_map_5,628129792 -random,memory,29323636,phmap::parallel_flat_hash_map_5,645992448 -random,memory,29330379,phmap::parallel_flat_hash_map_5,663855104 -random,memory,29331279,phmap::parallel_flat_hash_map_5,681713664 -random,memory,29336070,phmap::parallel_flat_hash_map_5,699576320 -random,memory,29339390,phmap::parallel_flat_hash_map_5,717447168 -random,memory,29341008,phmap::parallel_flat_hash_map_5,735305728 -random,memory,29341515,phmap::parallel_flat_hash_map_5,753164288 -random,memory,29344933,phmap::parallel_flat_hash_map_5,771031040 -random,memory,29349195,phmap::parallel_flat_hash_map_5,771031040 -random,memory,29349196,phmap::parallel_flat_hash_map_5,824614912 -random,memory,29352451,phmap::parallel_flat_hash_map_5,824614912 -random,memory,29352452,phmap::parallel_flat_hash_map_5,878198784 -random,memory,29355941,phmap::parallel_flat_hash_map_5,878198784 -random,memory,29355942,phmap::parallel_flat_hash_map_5,913915904 -random,memory,29356055,phmap::parallel_flat_hash_map_5,913915904 -random,memory,29356056,phmap::parallel_flat_hash_map_5,896053248 -random,memory,29359791,phmap::parallel_flat_hash_map_5,913915904 -random,memory,29363827,phmap::parallel_flat_hash_map_5,931774464 -random,memory,29366034,phmap::parallel_flat_hash_map_5,949637120 -random,memory,29367103,phmap::parallel_flat_hash_map_5,967495680 -random,memory,29368343,phmap::parallel_flat_hash_map_5,985354240 -random,memory,29375432,phmap::parallel_flat_hash_map_5,1003216896 -random,memory,29376205,phmap::parallel_flat_hash_map_5,1021071360 -random,memory,29378354,phmap::parallel_flat_hash_map_5,1038925824 -random,memory,29385594,phmap::parallel_flat_hash_map_5,1056784384 -random,memory,29388397,phmap::parallel_flat_hash_map_5,1074647040 -random,memory,29401214,phmap::parallel_flat_hash_map_5,1092509696 -random,memory,29403877,phmap::parallel_flat_hash_map_5,1110368256 -random,memory,29410744,phmap::parallel_flat_hash_map_5,1128235008 -random,memory,29417350,phmap::parallel_flat_hash_map_5,1146093568 -random,memory,29423045,phmap::parallel_flat_hash_map_5,1163956224 -random,memory,29554740,phmap::parallel_flat_hash_map_5,1163956224 -random,memory,29554741,phmap::parallel_flat_hash_map_5,1146097664 -random,time,30000000,phmap::parallel_flat_hash_map_5,2.388000 -random,memory,30058375,phmap::parallel_flat_hash_map_5,1146105856 -random,time,40000000,phmap::parallel_flat_hash_map_5,2.859000 -random,memory,40129005,phmap::parallel_flat_hash_map_5,1146118144 -random,time,50000000,phmap::parallel_flat_hash_map_5,3.398000 -random,memory,50130659,phmap::parallel_flat_hash_map_5,1146114048 -random,memory,58648505,phmap::parallel_flat_hash_map_5,1146114048 -random,memory,58648506,phmap::parallel_flat_hash_map_5,1217560576 -random,memory,58659099,phmap::parallel_flat_hash_map_5,1253285888 -random,memory,58664853,phmap::parallel_flat_hash_map_5,1289011200 -random,memory,58672884,phmap::parallel_flat_hash_map_5,1324728320 -random,memory,58677336,phmap::parallel_flat_hash_map_5,1360449536 -random,memory,58685517,phmap::parallel_flat_hash_map_5,1396174848 -random,memory,58694184,phmap::parallel_flat_hash_map_5,1431896064 -random,memory,58695854,phmap::parallel_flat_hash_map_5,1467617280 -random,memory,58697408,phmap::parallel_flat_hash_map_5,1503346688 -random,memory,58697642,phmap::parallel_flat_hash_map_5,1503346688 -random,memory,58697658,phmap::parallel_flat_hash_map_5,1467617280 -random,memory,58702073,phmap::parallel_flat_hash_map_5,1467617280 -random,memory,58702074,phmap::parallel_flat_hash_map_5,1539063808 -random,memory,58705027,phmap::parallel_flat_hash_map_5,1574785024 -random,memory,58706773,phmap::parallel_flat_hash_map_5,1610506240 -random,memory,58709256,phmap::parallel_flat_hash_map_5,1646227456 -random,memory,58713971,phmap::parallel_flat_hash_map_5,1681940480 -random,memory,58714578,phmap::parallel_flat_hash_map_5,1717657600 -random,memory,58714931,phmap::parallel_flat_hash_map_5,1753378816 -random,memory,58723299,phmap::parallel_flat_hash_map_5,1789095936 -random,memory,58724869,phmap::parallel_flat_hash_map_5,1824829440 -random,memory,58728103,phmap::parallel_flat_hash_map_5,1860554752 -random,memory,58728557,phmap::parallel_flat_hash_map_5,1896271872 -random,memory,58735188,phmap::parallel_flat_hash_map_5,1931988992 -random,memory,58736098,phmap::parallel_flat_hash_map_5,1967710208 -random,memory,58737751,phmap::parallel_flat_hash_map_5,2003435520 -random,memory,58738589,phmap::parallel_flat_hash_map_5,2039152640 -random,memory,58739865,phmap::parallel_flat_hash_map_5,2074877952 -random,memory,58742832,phmap::parallel_flat_hash_map_5,2110599168 -random,memory,58754529,phmap::parallel_flat_hash_map_5,2146316288 -random,memory,58762104,phmap::parallel_flat_hash_map_5,2182037504 -random,memory,58767879,phmap::parallel_flat_hash_map_5,2217758720 -random,memory,58778531,phmap::parallel_flat_hash_map_5,2253479936 -random,memory,58793956,phmap::parallel_flat_hash_map_5,2289197056 -random,memory,58800861,phmap::parallel_flat_hash_map_5,2324922368 -random,memory,58894435,phmap::parallel_flat_hash_map_5,2324922368 -random,memory,58894436,phmap::parallel_flat_hash_map_5,2289197056 -random,time,60000000,phmap::parallel_flat_hash_map_5,4.975000 -random,memory,60010883,phmap::parallel_flat_hash_map_5,2289197056 -random,time,70000000,phmap::parallel_flat_hash_map_5,5.534000 -random,memory,70176985,phmap::parallel_flat_hash_map_5,2289192960 -random,time,80000000,phmap::parallel_flat_hash_map_5,6.041000 -random,memory,80051145,phmap::parallel_flat_hash_map_5,2289180672 -random,time,90000000,phmap::parallel_flat_hash_map_5,6.613000 -random,memory,90041943,phmap::parallel_flat_hash_map_5,2289188864 -random,time,100000000,phmap::parallel_flat_hash_map_5,7.142000 -random,memory,0,phmap::parallel_flat_hash_map_6,9543680 -random,memory,240118,phmap::parallel_flat_hash_map_6,9543680 -random,memory,240274,phmap::parallel_flat_hash_map_6,13426688 -random,memory,447602,phmap::parallel_flat_hash_map_6,13713408 -random,memory,456883,phmap::parallel_flat_hash_map_6,13713408 -random,memory,456884,phmap::parallel_flat_hash_map_6,17674240 -random,memory,462632,phmap::parallel_flat_hash_map_6,17674240 -random,memory,462653,phmap::parallel_flat_hash_map_6,23060480 -random,memory,564425,phmap::parallel_flat_hash_map_6,23060480 -random,memory,564455,phmap::parallel_flat_hash_map_6,23908352 -random,memory,910490,phmap::parallel_flat_hash_map_6,23920640 -random,memory,910491,phmap::parallel_flat_hash_map_6,27856896 -random,memory,914643,phmap::parallel_flat_hash_map_6,27856896 -random,memory,914659,phmap::parallel_flat_hash_map_6,32354304 -random,memory,918680,phmap::parallel_flat_hash_map_6,32354304 -random,memory,918789,phmap::parallel_flat_hash_map_6,37982208 -random,memory,923341,phmap::parallel_flat_hash_map_6,37982208 -random,memory,923342,phmap::parallel_flat_hash_map_6,43044864 -random,memory,967301,phmap::parallel_flat_hash_map_6,43479040 -random,memory,1818677,phmap::parallel_flat_hash_map_6,43479040 -random,memory,1818678,phmap::parallel_flat_hash_map_6,46841856 -random,memory,1820853,phmap::parallel_flat_hash_map_6,46841856 -random,memory,1820854,phmap::parallel_flat_hash_map_6,42500096 -random,memory,1822952,phmap::parallel_flat_hash_map_6,42500096 -random,memory,1822953,phmap::parallel_flat_hash_map_6,45305856 -random,memory,1826229,phmap::parallel_flat_hash_map_6,45305856 -random,memory,1826230,phmap::parallel_flat_hash_map_6,48701440 -random,memory,1828846,phmap::parallel_flat_hash_map_6,48701440 -random,memory,1828847,phmap::parallel_flat_hash_map_6,52629504 -random,memory,1830697,phmap::parallel_flat_hash_map_6,53891072 -random,memory,1834838,phmap::parallel_flat_hash_map_6,53891072 -random,memory,1834839,phmap::parallel_flat_hash_map_6,57712640 -random,memory,1835801,phmap::parallel_flat_hash_map_6,57712640 -random,memory,1835802,phmap::parallel_flat_hash_map_6,60547072 -random,memory,1837939,phmap::parallel_flat_hash_map_6,61685760 -random,memory,1839861,phmap::parallel_flat_hash_map_6,61685760 -random,memory,1839862,phmap::parallel_flat_hash_map_6,65630208 -random,memory,1843172,phmap::parallel_flat_hash_map_6,65630208 -random,memory,1843173,phmap::parallel_flat_hash_map_6,67899392 -random,memory,1846075,phmap::parallel_flat_hash_map_6,67899392 -random,memory,1846076,phmap::parallel_flat_hash_map_6,71970816 -random,memory,1850890,phmap::parallel_flat_hash_map_6,73510912 -random,memory,1864735,phmap::parallel_flat_hash_map_6,73510912 -random,memory,1864741,phmap::parallel_flat_hash_map_6,75743232 -random,memory,3640764,phmap::parallel_flat_hash_map_6,77979648 -random,memory,3643691,phmap::parallel_flat_hash_map_6,77979648 -random,memory,3643692,phmap::parallel_flat_hash_map_6,82452480 -random,memory,3647471,phmap::parallel_flat_hash_map_6,84680704 -random,memory,3647865,phmap::parallel_flat_hash_map_6,84680704 -random,memory,3647866,phmap::parallel_flat_hash_map_6,88031232 -random,memory,3657171,phmap::parallel_flat_hash_map_6,90267648 -random,memory,3659273,phmap::parallel_flat_hash_map_6,90267648 -random,memory,3659274,phmap::parallel_flat_hash_map_6,94740480 -random,memory,3661956,phmap::parallel_flat_hash_map_6,94736384 -random,memory,3661971,phmap::parallel_flat_hash_map_6,98091008 -random,memory,3662467,phmap::parallel_flat_hash_map_6,98091008 -random,memory,3662468,phmap::parallel_flat_hash_map_6,101441536 -random,memory,3663439,phmap::parallel_flat_hash_map_6,102551552 -random,memory,3664226,phmap::parallel_flat_hash_map_6,103669760 -random,memory,3667885,phmap::parallel_flat_hash_map_6,103669760 -random,memory,3667897,phmap::parallel_flat_hash_map_6,109264896 -random,memory,3669813,phmap::parallel_flat_hash_map_6,109264896 -random,memory,3669814,phmap::parallel_flat_hash_map_6,113737728 -random,memory,3675800,phmap::parallel_flat_hash_map_6,114843648 -random,memory,3675801,phmap::parallel_flat_hash_map_6,119308288 -random,memory,3676890,phmap::parallel_flat_hash_map_6,121536512 -random,memory,3679398,phmap::parallel_flat_hash_map_6,124882944 -random,memory,3681013,phmap::parallel_flat_hash_map_6,129355776 -random,memory,3682289,phmap::parallel_flat_hash_map_6,129355776 -random,memory,3682290,phmap::parallel_flat_hash_map_6,133828608 -random,memory,3684318,phmap::parallel_flat_hash_map_6,137175040 -random,memory,3687517,phmap::parallel_flat_hash_map_6,138289152 -random,memory,3687518,phmap::parallel_flat_hash_map_6,142753792 -random,memory,3690464,phmap::parallel_flat_hash_map_6,144990208 -random,memory,3699225,phmap::parallel_flat_hash_map_6,147214336 -random,memory,7301764,phmap::parallel_flat_hash_map_6,147214336 -random,memory,7301801,phmap::parallel_flat_hash_map_6,151683072 -random,memory,7304266,phmap::parallel_flat_hash_map_6,151683072 -random,memory,7304267,phmap::parallel_flat_hash_map_6,158388224 -random,memory,7306423,phmap::parallel_flat_hash_map_6,158388224 -random,memory,7306424,phmap::parallel_flat_hash_map_6,167325696 -random,memory,7307845,phmap::parallel_flat_hash_map_6,167325696 -random,memory,7307846,phmap::parallel_flat_hash_map_6,165093376 -random,memory,7313817,phmap::parallel_flat_hash_map_6,167325696 -random,memory,7317898,phmap::parallel_flat_hash_map_6,167325696 -random,memory,7317899,phmap::parallel_flat_hash_map_6,174026752 -random,memory,7321356,phmap::parallel_flat_hash_map_6,178499584 -random,memory,7324610,phmap::parallel_flat_hash_map_6,178503680 -random,memory,7324611,phmap::parallel_flat_hash_map_6,185208832 -random,memory,7324981,phmap::parallel_flat_hash_map_6,189681664 -random,memory,7329399,phmap::parallel_flat_hash_map_6,191909888 -random,memory,7330096,phmap::parallel_flat_hash_map_6,194138112 -random,memory,7330493,phmap::parallel_flat_hash_map_6,194138112 -random,memory,7330494,phmap::parallel_flat_hash_map_6,200835072 -random,memory,7330662,phmap::parallel_flat_hash_map_6,203063296 -random,memory,7331146,phmap::parallel_flat_hash_map_6,203063296 -random,memory,7331147,phmap::parallel_flat_hash_map_6,200830976 -random,memory,7333567,phmap::parallel_flat_hash_map_6,200830976 -random,memory,7333568,phmap::parallel_flat_hash_map_6,207536128 -random,memory,7334082,phmap::parallel_flat_hash_map_6,212000768 -random,memory,7339042,phmap::parallel_flat_hash_map_6,214237184 -random,memory,7339605,phmap::parallel_flat_hash_map_6,216465408 -random,memory,7340722,phmap::parallel_flat_hash_map_6,218705920 -random,memory,7341125,phmap::parallel_flat_hash_map_6,220938240 -random,memory,7342276,phmap::parallel_flat_hash_map_6,223170560 -random,memory,7342941,phmap::parallel_flat_hash_map_6,223170560 -random,memory,7342942,phmap::parallel_flat_hash_map_6,229871616 -random,memory,7346101,phmap::parallel_flat_hash_map_6,238796800 -random,memory,7350910,phmap::parallel_flat_hash_map_6,252190720 -random,memory,7351597,phmap::parallel_flat_hash_map_6,256651264 -random,memory,7353724,phmap::parallel_flat_hash_map_6,263344128 -random,memory,7356932,phmap::parallel_flat_hash_map_6,267808768 -random,memory,7365358,phmap::parallel_flat_hash_map_6,278974464 -random,memory,7367048,phmap::parallel_flat_hash_map_6,285671424 -random,memory,7381268,phmap::parallel_flat_hash_map_6,292360192 -random,time,10000000,phmap::parallel_flat_hash_map_6,0.625000 -random,memory,10048265,phmap::parallel_flat_hash_map_6,290181120 -random,memory,14624411,phmap::parallel_flat_hash_map_6,290181120 -random,memory,14624412,phmap::parallel_flat_hash_map_6,299114496 -random,memory,14629443,phmap::parallel_flat_hash_map_6,303583232 -random,memory,14631641,phmap::parallel_flat_hash_map_6,308051968 -random,memory,14634044,phmap::parallel_flat_hash_map_6,312520704 -random,memory,14636495,phmap::parallel_flat_hash_map_6,312520704 -random,memory,14636496,phmap::parallel_flat_hash_map_6,325926912 -random,memory,14643433,phmap::parallel_flat_hash_map_6,325914624 -random,memory,14643434,phmap::parallel_flat_hash_map_6,339308544 -random,memory,14646091,phmap::parallel_flat_hash_map_6,348241920 -random,memory,14653553,phmap::parallel_flat_hash_map_6,352714752 -random,memory,14654529,phmap::parallel_flat_hash_map_6,357175296 -random,memory,14655407,phmap::parallel_flat_hash_map_6,361639936 -random,memory,14658190,phmap::parallel_flat_hash_map_6,366100480 -random,memory,14658689,phmap::parallel_flat_hash_map_6,366100480 -random,memory,14658907,phmap::parallel_flat_hash_map_6,361635840 -random,memory,14658907,phmap::parallel_flat_hash_map_6,370573312 -random,memory,14659991,phmap::parallel_flat_hash_map_6,375033856 -random,memory,14665883,phmap::parallel_flat_hash_map_6,379498496 -random,memory,14667814,phmap::parallel_flat_hash_map_6,383963136 -random,memory,14669160,phmap::parallel_flat_hash_map_6,388435968 -random,memory,14671676,phmap::parallel_flat_hash_map_6,392896512 -random,memory,14673074,phmap::parallel_flat_hash_map_6,397361152 -random,memory,14675455,phmap::parallel_flat_hash_map_6,401829888 -random,memory,14676709,phmap::parallel_flat_hash_map_6,406294528 -random,memory,14678913,phmap::parallel_flat_hash_map_6,410755072 -random,memory,14680761,phmap::parallel_flat_hash_map_6,415219712 -random,memory,14682022,phmap::parallel_flat_hash_map_6,415219712 -random,memory,14682023,phmap::parallel_flat_hash_map_6,428617728 -random,memory,14682463,phmap::parallel_flat_hash_map_6,437538816 -random,memory,14685126,phmap::parallel_flat_hash_map_6,437534720 -random,memory,14685127,phmap::parallel_flat_hash_map_6,450936832 -random,memory,14686842,phmap::parallel_flat_hash_map_6,455397376 -random,memory,14690779,phmap::parallel_flat_hash_map_6,473251840 -random,memory,14692933,phmap::parallel_flat_hash_map_6,491126784 -random,memory,14696606,phmap::parallel_flat_hash_map_6,508981248 -random,memory,14700141,phmap::parallel_flat_hash_map_6,526839808 -random,memory,14711382,phmap::parallel_flat_hash_map_6,544669696 -random,memory,14716503,phmap::parallel_flat_hash_map_6,562524160 -random,memory,14754604,phmap::parallel_flat_hash_map_6,580382720 -random,time,20000000,phmap::parallel_flat_hash_map_6,1.270000 -random,memory,20169331,phmap::parallel_flat_hash_map_6,575909888 -random,memory,29250365,phmap::parallel_flat_hash_map_6,575909888 -random,memory,29250366,phmap::parallel_flat_hash_map_6,593772544 -random,memory,29252297,phmap::parallel_flat_hash_map_6,593772544 -random,memory,29252377,phmap::parallel_flat_hash_map_6,584843264 -random,memory,29295606,phmap::parallel_flat_hash_map_6,584843264 -random,memory,29295607,phmap::parallel_flat_hash_map_6,620572672 -random,memory,29295923,phmap::parallel_flat_hash_map_6,620572672 -random,memory,29295924,phmap::parallel_flat_hash_map_6,611643392 -random,memory,29298230,phmap::parallel_flat_hash_map_6,611643392 -random,memory,29298231,phmap::parallel_flat_hash_map_6,638443520 -random,memory,29301877,phmap::parallel_flat_hash_map_6,647368704 -random,memory,29307581,phmap::parallel_flat_hash_map_6,656306176 -random,memory,29311094,phmap::parallel_flat_hash_map_6,656306176 -random,memory,29311198,phmap::parallel_flat_hash_map_6,647376896 -random,memory,29318505,phmap::parallel_flat_hash_map_6,647376896 -random,memory,29318506,phmap::parallel_flat_hash_map_6,683106304 -random,memory,29324837,phmap::parallel_flat_hash_map_6,692023296 -random,memory,29328401,phmap::parallel_flat_hash_map_6,700956672 -random,memory,29328874,phmap::parallel_flat_hash_map_6,709894144 -random,memory,29330147,phmap::parallel_flat_hash_map_6,709894144 -random,memory,29330451,phmap::parallel_flat_hash_map_6,700964864 -random,memory,29330597,phmap::parallel_flat_hash_map_6,718827520 -random,memory,29333160,phmap::parallel_flat_hash_map_6,727752704 -random,memory,29336160,phmap::parallel_flat_hash_map_6,727752704 -random,memory,29336161,phmap::parallel_flat_hash_map_6,754544640 -random,memory,29344925,phmap::parallel_flat_hash_map_6,763461632 -random,memory,29345974,phmap::parallel_flat_hash_map_6,772395008 -random,memory,29346343,phmap::parallel_flat_hash_map_6,781320192 -random,memory,29348045,phmap::parallel_flat_hash_map_6,790245376 -random,memory,29352980,phmap::parallel_flat_hash_map_6,790245376 -random,memory,29352981,phmap::parallel_flat_hash_map_6,817045504 -random,memory,29357735,phmap::parallel_flat_hash_map_6,825970688 -random,memory,29359312,phmap::parallel_flat_hash_map_6,834904064 -random,memory,29360390,phmap::parallel_flat_hash_map_6,843833344 -random,memory,29361898,phmap::parallel_flat_hash_map_6,852766720 -random,memory,29363938,phmap::parallel_flat_hash_map_6,852766720 -random,memory,29363939,phmap::parallel_flat_hash_map_6,879566848 -random,memory,29366353,phmap::parallel_flat_hash_map_6,888500224 -random,memory,29366942,phmap::parallel_flat_hash_map_6,888500224 -random,memory,29366943,phmap::parallel_flat_hash_map_6,915296256 -random,memory,29370042,phmap::parallel_flat_hash_map_6,951009280 -random,memory,29373396,phmap::parallel_flat_hash_map_6,977809408 -random,memory,29378157,phmap::parallel_flat_hash_map_6,986734592 -random,memory,29378158,phmap::parallel_flat_hash_map_6,1022464000 -random,memory,29382962,phmap::parallel_flat_hash_map_6,1022464000 -random,memory,29382991,phmap::parallel_flat_hash_map_6,1004597248 -random,memory,29383516,phmap::parallel_flat_hash_map_6,1022435328 -random,memory,29395727,phmap::parallel_flat_hash_map_6,1067077632 -random,memory,29400644,phmap::parallel_flat_hash_map_6,1076006912 -random,memory,29411954,phmap::parallel_flat_hash_map_6,1111728128 -random,memory,29443826,phmap::parallel_flat_hash_map_6,1138507776 -random,time,30000000,phmap::parallel_flat_hash_map_6,2.204000 -random,memory,30001149,phmap::parallel_flat_hash_map_6,1147379712 -random,time,40000000,phmap::parallel_flat_hash_map_6,2.689000 -random,memory,40030313,phmap::parallel_flat_hash_map_6,1147482112 -random,time,50000000,phmap::parallel_flat_hash_map_6,3.140000 -random,memory,50107035,phmap::parallel_flat_hash_map_6,1147482112 -random,memory,58567867,phmap::parallel_flat_hash_map_6,1147482112 -random,memory,58567868,phmap::parallel_flat_hash_map_6,1183207424 -random,memory,58618664,phmap::parallel_flat_hash_map_6,1183207424 -random,memory,58618684,phmap::parallel_flat_hash_map_6,1165344768 -random,memory,58633951,phmap::parallel_flat_hash_map_6,1165344768 -random,memory,58633952,phmap::parallel_flat_hash_map_6,1201070080 -random,memory,58638135,phmap::parallel_flat_hash_map_6,1218936832 -random,memory,58641806,phmap::parallel_flat_hash_map_6,1218936832 -random,memory,58641908,phmap::parallel_flat_hash_map_6,1201074176 -random,memory,58644567,phmap::parallel_flat_hash_map_6,1236799488 -random,memory,58654398,phmap::parallel_flat_hash_map_6,1254662144 -random,memory,58661628,phmap::parallel_flat_hash_map_6,1272520704 -random,memory,58663366,phmap::parallel_flat_hash_map_6,1290387456 -random,memory,58664512,phmap::parallel_flat_hash_map_6,1308254208 -random,memory,58666679,phmap::parallel_flat_hash_map_6,1308254208 -random,memory,58666680,phmap::parallel_flat_hash_map_6,1361838080 -random,memory,58673711,phmap::parallel_flat_hash_map_6,1379696640 -random,memory,58680789,phmap::parallel_flat_hash_map_6,1397563392 -random,memory,58684297,phmap::parallel_flat_hash_map_6,1415430144 -random,memory,58688420,phmap::parallel_flat_hash_map_6,1433292800 -random,memory,58688990,phmap::parallel_flat_hash_map_6,1451155456 -random,memory,58690080,phmap::parallel_flat_hash_map_6,1469009920 -random,memory,58692416,phmap::parallel_flat_hash_map_6,1469009920 -random,memory,58692417,phmap::parallel_flat_hash_map_6,1522601984 -random,memory,58696961,phmap::parallel_flat_hash_map_6,1522597888 -random,memory,58696962,phmap::parallel_flat_hash_map_6,1576189952 -random,memory,58701807,phmap::parallel_flat_hash_map_6,1611915264 -random,memory,58702134,phmap::parallel_flat_hash_map_6,1611915264 -random,memory,58702166,phmap::parallel_flat_hash_map_6,1594044416 -random,memory,58702606,phmap::parallel_flat_hash_map_6,1611902976 -random,memory,58705250,phmap::parallel_flat_hash_map_6,1629765632 -random,memory,58706394,phmap::parallel_flat_hash_map_6,1647628288 -random,memory,58709861,phmap::parallel_flat_hash_map_6,1665486848 -random,memory,58710992,phmap::parallel_flat_hash_map_6,1683341312 -random,memory,58718600,phmap::parallel_flat_hash_map_6,1683341312 -random,memory,58718601,phmap::parallel_flat_hash_map_6,1736929280 -random,memory,58718836,phmap::parallel_flat_hash_map_6,1736929280 -random,memory,58718837,phmap::parallel_flat_hash_map_6,1719058432 -random,memory,58719131,phmap::parallel_flat_hash_map_6,1736916992 -random,memory,58721235,phmap::parallel_flat_hash_map_6,1754771456 -random,memory,58722232,phmap::parallel_flat_hash_map_6,1772634112 -random,memory,58723234,phmap::parallel_flat_hash_map_6,1790488576 -random,memory,58726963,phmap::parallel_flat_hash_map_6,1861939200 -random,memory,58740972,phmap::parallel_flat_hash_map_6,1933361152 -random,memory,58745109,phmap::parallel_flat_hash_map_6,2022670336 -random,memory,58754148,phmap::parallel_flat_hash_map_6,2058383360 -random,memory,58765822,phmap::parallel_flat_hash_map_6,2147696640 -random,memory,58786363,phmap::parallel_flat_hash_map_6,2201272320 -random,memory,58826606,phmap::parallel_flat_hash_map_6,2272714752 -random,time,60000000,phmap::parallel_flat_hash_map_6,4.560000 -random,memory,60000411,phmap::parallel_flat_hash_map_6,2290417664 -random,time,70000000,phmap::parallel_flat_hash_map_6,5.082000 -random,memory,70138168,phmap::parallel_flat_hash_map_6,2290565120 -random,time,80000000,phmap::parallel_flat_hash_map_6,5.576000 -random,memory,80036567,phmap::parallel_flat_hash_map_6,2290556928 -random,time,90000000,phmap::parallel_flat_hash_map_6,6.062000 -random,memory,90013913,phmap::parallel_flat_hash_map_6,2290528256 -random,time,100000000,phmap::parallel_flat_hash_map_6,6.613000 diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/cmake/CMakeLists.txt.in b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/cmake/CMakeLists.txt.in deleted file mode 100644 index 3e01ff5..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/cmake/CMakeLists.txt.in +++ /dev/null @@ -1,15 +0,0 @@ -cmake_minimum_required(VERSION 3.8) - -project(googletest-download NONE) - -include(ExternalProject) -ExternalProject_Add(googletest - GIT_REPOSITORY https://github.com/google/googletest.git - GIT_TAG main - SOURCE_DIR "${CMAKE_BINARY_DIR}/googletest-src" - BINARY_DIR "${CMAKE_BINARY_DIR}/googletest-build" - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" - TEST_COMMAND "" -) \ No newline at end of file diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/cmake/DetectVersion.cmake b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/cmake/DetectVersion.cmake deleted file mode 100644 index 4bffa5e..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/cmake/DetectVersion.cmake +++ /dev/null @@ -1,8 +0,0 @@ - -file(READ "${CMAKE_CURRENT_SOURCE_DIR}/parallel_hashmap/phmap_config.h" _PHMAP_H_CONTENTS) -string(REGEX REPLACE ".*#define PHMAP_VERSION_MAJOR ([0-9]+).*" "\\1" DETECTED_PHMAP_VERSION_MAJOR "${_PHMAP_H_CONTENTS}") -string(REGEX REPLACE ".*#define PHMAP_VERSION_MINOR ([0-9]+).*" "\\1" DETECTED_PHMAP_VERSION_MINOR "${_PHMAP_H_CONTENTS}") -string(REGEX REPLACE ".*#define PHMAP_VERSION_PATCH ([0-9]+).*" "\\1" DETECTED_PHMAP_VERSION_PATCH "${_PHMAP_H_CONTENTS}") -set(DETECTED_PHMAP_VERSION "${DETECTED_PHMAP_VERSION_MAJOR}.${DETECTED_PHMAP_VERSION_MINOR}.${DETECTED_PHMAP_VERSION_PATCH}") - -message(STATUS "Detected PHMAP Version - ${DETECTED_PHMAP_VERSION}") diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/cmake/DownloadGTest.cmake b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/cmake/DownloadGTest.cmake deleted file mode 100644 index 055270f..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/cmake/DownloadGTest.cmake +++ /dev/null @@ -1,34 +0,0 @@ -# Downloads and unpacks googletest at configure time. Based on the instructions -# at https://github.com/google/googletest/tree/master/googletest#incorporating-into-an-existing-cmake-project - -# Download the latest googletest from Github master -configure_file( - ${CMAKE_CURRENT_LIST_DIR}/CMakeLists.txt.in - ${CMAKE_BINARY_DIR}/googletest-download/CMakeLists.txt -) - -# Configure and build the downloaded googletest source -execute_process(COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" . - RESULT_VARIABLE result - WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/googletest-download ) - -if(result) - message(FATAL_ERROR "CMake step for googletest failed: ${result}") -endif() - -execute_process(COMMAND ${CMAKE_COMMAND} --build . - RESULT_VARIABLE result - WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/googletest-download) - -if(result) - message(FATAL_ERROR "Build step for googletest failed: ${result}") -endif() - -# Prevent overriding the parent project's compiler/linker settings on Windows -set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) - -# Add googletest directly to our build. This defines the gtest and gtest_main -# targets. -add_subdirectory(${CMAKE_BINARY_DIR}/googletest-src - ${CMAKE_BINARY_DIR}/googletest-build - EXCLUDE_FROM_ALL) diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/cmake/helpers.cmake b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/cmake/helpers.cmake deleted file mode 100644 index b290066..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/cmake/helpers.cmake +++ /dev/null @@ -1,60 +0,0 @@ -#set_property(GLOBAL PROPERTY USE_FOLDERS ON) -set(PHMAP_IDE_FOLDER phmap) - -# ------------------------------------------------------------- -# phmap_cc_test(NAME awesome_test -# SRCS "awesome_test.cc" -# DEPS phmap::awesome gmock gtest_main) -# ------------------------------------------------------------- -function(phmap_cc_test) - cmake_parse_arguments(PHMAP_CC_TEST - "" - "NAME" - "SRCS;COPTS;CWOPTS;CLOPTS;DEFINES;LINKOPTS;DEPS" - ${ARGN} - ) - - set(_NAME "test_${PHMAP_CC_TEST_NAME}") - add_executable(${_NAME} "") - target_sources(${_NAME} PRIVATE ${PHMAP_CC_TEST_SRCS}) - target_include_directories(${_NAME} - PUBLIC ${PHMAP_COMMON_INCLUDE_DIRS} - PRIVATE ${GMOCK_INCLUDE_DIRS} ${GTEST_INCLUDE_DIRS} - ) - target_compile_definitions(${_NAME} - PUBLIC ${PHMAP_CC_TEST_DEFINES} - ) -if(MSVC) - target_compile_options(${_NAME} - PRIVATE ${PHMAP_CC_TEST_CWOPTS} /W4 /Zc:__cplusplus /std:c++latest - ) -else() - target_compile_options(${_NAME} - PRIVATE ${PHMAP_CC_TEST_CLOPTS} - ) -endif() - target_compile_options(${_NAME} - PRIVATE ${PHMAP_CC_TEST_COPTS} - ) - target_link_libraries(${_NAME} - PUBLIC ${PHMAP_CC_TEST_DEPS} - PRIVATE ${PHMAP_CC_TEST_LINKOPTS} - ) - # Add all Abseil targets to a a folder in the IDE for organization. - set_property(TARGET ${_NAME} PROPERTY FOLDER ${PHMAP_IDE_FOLDER}/test) - - set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${PHMAP_CXX_STANDARD}) - set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) - - add_test(NAME ${_NAME} COMMAND ${_NAME}) -endfunction() - -# ------------------------------------------------------------- -function(check_target my_target) - if(NOT TARGET ${my_target}) - message(FATAL_ERROR " PHMAP: compiling phmap tests requires a ${my_target} CMake target in your project, - see CMake/README.md for more details") - endif(NOT TARGET ${my_target}) -endfunction() - - diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/cmake/phmap.cmake b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/cmake/phmap.cmake deleted file mode 100644 index 931bef1..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/cmake/phmap.cmake +++ /dev/null @@ -1,42 +0,0 @@ -# --------------------------------------------------------------------------- -# Copyright (c) 2019, Gregory Popovitch - greg7mdp@gmail.com -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Includes work from abseil-cpp (https://github.com/abseil/abseil-cpp) -# with modifications. -# -# Copyright 2017 The Abseil Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# --------------------------------------------------------------------------- - - -include(CMakeParseArguments) - -function(check_target my_target) - if(NOT TARGET ${my_target}) - message(FATAL_ERROR " ABSL: compiling absl requires a ${my_target} CMake target in your project, - see CMake/README.md for more details") - endif(NOT TARGET ${my_target}) -endfunction() diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/css/bootstrap-responsive.min.css b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/css/bootstrap-responsive.min.css deleted file mode 100644 index ab59da3..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/css/bootstrap-responsive.min.css +++ /dev/null @@ -1,9 +0,0 @@ -/*! - * Bootstrap Responsive v2.1.0 - * - * Copyright 2012 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world @twitter by @mdo and @fat. - */.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;line-height:0;content:""}.clearfix:after{clear:both}.hide-text{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.input-block-level{display:block;width:100%;min-height:30px;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.hidden{display:none;visibility:hidden}.visible-phone{display:none!important}.visible-tablet{display:none!important}.hidden-desktop{display:none!important}.visible-desktop{display:inherit!important}@media(min-width:768px) and (max-width:979px){.hidden-desktop{display:inherit!important}.visible-desktop{display:none!important}.visible-tablet{display:inherit!important}.hidden-tablet{display:none!important}}@media(max-width:767px){.hidden-desktop{display:inherit!important}.visible-desktop{display:none!important}.visible-phone{display:inherit!important}.hidden-phone{display:none!important}}@media(min-width:1200px){.row{margin-left:-30px;*zoom:1}.row:before,.row:after{display:table;line-height:0;content:""}.row:after{clear:both}[class*="span"]{float:left;margin-left:30px}.container,.navbar-static-top .container,.navbar-fixed-top .container,.navbar-fixed-bottom .container{width:1170px}.span12{width:1170px}.span11{width:1070px}.span10{width:970px}.span9{width:870px}.span8{width:770px}.span7{width:670px}.span6{width:570px}.span5{width:470px}.span4{width:370px}.span3{width:270px}.span2{width:170px}.span1{width:70px}.offset12{margin-left:1230px}.offset11{margin-left:1130px}.offset10{margin-left:1030px}.offset9{margin-left:930px}.offset8{margin-left:830px}.offset7{margin-left:730px}.offset6{margin-left:630px}.offset5{margin-left:530px}.offset4{margin-left:430px}.offset3{margin-left:330px}.offset2{margin-left:230px}.offset1{margin-left:130px}.row-fluid{width:100%;*zoom:1}.row-fluid:before,.row-fluid:after{display:table;line-height:0;content:""}.row-fluid:after{clear:both}.row-fluid [class*="span"]{display:block;float:left;width:100%;min-height:30px;margin-left:2.564102564102564%;*margin-left:2.5109110747408616%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.row-fluid [class*="span"]:first-child{margin-left:0}.row-fluid .span12{width:100%;*width:99.94680851063829%}.row-fluid .span11{width:91.45299145299145%;*width:91.39979996362975%}.row-fluid .span10{width:82.90598290598291%;*width:82.8527914166212%}.row-fluid .span9{width:74.35897435897436%;*width:74.30578286961266%}.row-fluid .span8{width:65.81196581196582%;*width:65.75877432260411%}.row-fluid .span7{width:57.26495726495726%;*width:57.21176577559556%}.row-fluid .span6{width:48.717948717948715%;*width:48.664757228587014%}.row-fluid .span5{width:40.17094017094017%;*width:40.11774868157847%}.row-fluid .span4{width:31.623931623931625%;*width:31.570740134569924%}.row-fluid .span3{width:23.076923076923077%;*width:23.023731587561375%}.row-fluid .span2{width:14.52991452991453%;*width:14.476723040552828%}.row-fluid .span1{width:5.982905982905983%;*width:5.929714493544281%}.row-fluid .offset12{margin-left:105.12820512820512%;*margin-left:105.02182214948171%}.row-fluid .offset12:first-child{margin-left:102.56410256410257%;*margin-left:102.45771958537915%}.row-fluid .offset11{margin-left:96.58119658119658%;*margin-left:96.47481360247316%}.row-fluid .offset11:first-child{margin-left:94.01709401709402%;*margin-left:93.91071103837061%}.row-fluid .offset10{margin-left:88.03418803418803%;*margin-left:87.92780505546462%}.row-fluid .offset10:first-child{margin-left:85.47008547008548%;*margin-left:85.36370249136206%}.row-fluid .offset9{margin-left:79.48717948717949%;*margin-left:79.38079650845607%}.row-fluid .offset9:first-child{margin-left:76.92307692307693%;*margin-left:76.81669394435352%}.row-fluid .offset8{margin-left:70.94017094017094%;*margin-left:70.83378796144753%}.row-fluid .offset8:first-child{margin-left:68.37606837606839%;*margin-left:68.26968539734497%}.row-fluid .offset7{margin-left:62.393162393162385%;*margin-left:62.28677941443899%}.row-fluid .offset7:first-child{margin-left:59.82905982905982%;*margin-left:59.72267685033642%}.row-fluid .offset6{margin-left:53.84615384615384%;*margin-left:53.739770867430444%}.row-fluid .offset6:first-child{margin-left:51.28205128205128%;*margin-left:51.175668303327875%}.row-fluid .offset5{margin-left:45.299145299145295%;*margin-left:45.1927623204219%}.row-fluid .offset5:first-child{margin-left:42.73504273504273%;*margin-left:42.62865975631933%}.row-fluid .offset4{margin-left:36.75213675213675%;*margin-left:36.645753773413354%}.row-fluid .offset4:first-child{margin-left:34.18803418803419%;*margin-left:34.081651209310785%}.row-fluid .offset3{margin-left:28.205128205128204%;*margin-left:28.0987452264048%}.row-fluid .offset3:first-child{margin-left:25.641025641025642%;*margin-left:25.53464266230224%}.row-fluid .offset2{margin-left:19.65811965811966%;*margin-left:19.551736679396257%}.row-fluid .offset2:first-child{margin-left:17.094017094017094%;*margin-left:16.98763411529369%}.row-fluid .offset1{margin-left:11.11111111111111%;*margin-left:11.004728132387708%}.row-fluid .offset1:first-child{margin-left:8.547008547008547%;*margin-left:8.440625568285142%}input,textarea,.uneditable-input{margin-left:0}.controls-row [class*="span"]+[class*="span"]{margin-left:30px}input.span12,textarea.span12,.uneditable-input.span12{width:1156px}input.span11,textarea.span11,.uneditable-input.span11{width:1056px}input.span10,textarea.span10,.uneditable-input.span10{width:956px}input.span9,textarea.span9,.uneditable-input.span9{width:856px}input.span8,textarea.span8,.uneditable-input.span8{width:756px}input.span7,textarea.span7,.uneditable-input.span7{width:656px}input.span6,textarea.span6,.uneditable-input.span6{width:556px}input.span5,textarea.span5,.uneditable-input.span5{width:456px}input.span4,textarea.span4,.uneditable-input.span4{width:356px}input.span3,textarea.span3,.uneditable-input.span3{width:256px}input.span2,textarea.span2,.uneditable-input.span2{width:156px}input.span1,textarea.span1,.uneditable-input.span1{width:56px}.thumbnails{margin-left:-30px}.thumbnails>li{margin-left:30px}.row-fluid .thumbnails{margin-left:0}}@media(min-width:768px) and (max-width:979px){.row{margin-left:-20px;*zoom:1}.row:before,.row:after{display:table;line-height:0;content:""}.row:after{clear:both}[class*="span"]{float:left;margin-left:20px}.container,.navbar-static-top .container,.navbar-fixed-top .container,.navbar-fixed-bottom .container{width:724px}.span12{width:724px}.span11{width:662px}.span10{width:600px}.span9{width:538px}.span8{width:476px}.span7{width:414px}.span6{width:352px}.span5{width:290px}.span4{width:228px}.span3{width:166px}.span2{width:104px}.span1{width:42px}.offset12{margin-left:764px}.offset11{margin-left:702px}.offset10{margin-left:640px}.offset9{margin-left:578px}.offset8{margin-left:516px}.offset7{margin-left:454px}.offset6{margin-left:392px}.offset5{margin-left:330px}.offset4{margin-left:268px}.offset3{margin-left:206px}.offset2{margin-left:144px}.offset1{margin-left:82px}.row-fluid{width:100%;*zoom:1}.row-fluid:before,.row-fluid:after{display:table;line-height:0;content:""}.row-fluid:after{clear:both}.row-fluid [class*="span"]{display:block;float:left;width:100%;min-height:30px;margin-left:2.7624309392265194%;*margin-left:2.709239449864817%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.row-fluid [class*="span"]:first-child{margin-left:0}.row-fluid .span12{width:100%;*width:99.94680851063829%}.row-fluid .span11{width:91.43646408839778%;*width:91.38327259903608%}.row-fluid .span10{width:82.87292817679558%;*width:82.81973668743387%}.row-fluid .span9{width:74.30939226519337%;*width:74.25620077583166%}.row-fluid .span8{width:65.74585635359117%;*width:65.69266486422946%}.row-fluid .span7{width:57.18232044198895%;*width:57.12912895262725%}.row-fluid .span6{width:48.61878453038674%;*width:48.56559304102504%}.row-fluid .span5{width:40.05524861878453%;*width:40.00205712942283%}.row-fluid .span4{width:31.491712707182323%;*width:31.43852121782062%}.row-fluid .span3{width:22.92817679558011%;*width:22.87498530621841%}.row-fluid .span2{width:14.3646408839779%;*width:14.311449394616199%}.row-fluid .span1{width:5.801104972375691%;*width:5.747913483013988%}.row-fluid .offset12{margin-left:105.52486187845304%;*margin-left:105.41847889972962%}.row-fluid .offset12:first-child{margin-left:102.76243093922652%;*margin-left:102.6560479605031%}.row-fluid .offset11{margin-left:96.96132596685082%;*margin-left:96.8549429881274%}.row-fluid .offset11:first-child{margin-left:94.1988950276243%;*margin-left:94.09251204890089%}.row-fluid .offset10{margin-left:88.39779005524862%;*margin-left:88.2914070765252%}.row-fluid .offset10:first-child{margin-left:85.6353591160221%;*margin-left:85.52897613729868%}.row-fluid .offset9{margin-left:79.8342541436464%;*margin-left:79.72787116492299%}.row-fluid .offset9:first-child{margin-left:77.07182320441989%;*margin-left:76.96544022569647%}.row-fluid .offset8{margin-left:71.2707182320442%;*margin-left:71.16433525332079%}.row-fluid .offset8:first-child{margin-left:68.50828729281768%;*margin-left:68.40190431409427%}.row-fluid .offset7{margin-left:62.70718232044199%;*margin-left:62.600799341718584%}.row-fluid .offset7:first-child{margin-left:59.94475138121547%;*margin-left:59.838368402492065%}.row-fluid .offset6{margin-left:54.14364640883978%;*margin-left:54.037263430116376%}.row-fluid .offset6:first-child{margin-left:51.38121546961326%;*margin-left:51.27483249088986%}.row-fluid .offset5{margin-left:45.58011049723757%;*margin-left:45.47372751851417%}.row-fluid .offset5:first-child{margin-left:42.81767955801105%;*margin-left:42.71129657928765%}.row-fluid .offset4{margin-left:37.01657458563536%;*margin-left:36.91019160691196%}.row-fluid .offset4:first-child{margin-left:34.25414364640884%;*margin-left:34.14776066768544%}.row-fluid .offset3{margin-left:28.45303867403315%;*margin-left:28.346655695309746%}.row-fluid .offset3:first-child{margin-left:25.69060773480663%;*margin-left:25.584224756083227%}.row-fluid .offset2{margin-left:19.88950276243094%;*margin-left:19.783119783707537%}.row-fluid .offset2:first-child{margin-left:17.12707182320442%;*margin-left:17.02068884448102%}.row-fluid .offset1{margin-left:11.32596685082873%;*margin-left:11.219583872105325%}.row-fluid .offset1:first-child{margin-left:8.56353591160221%;*margin-left:8.457152932878806%}input,textarea,.uneditable-input{margin-left:0}.controls-row [class*="span"]+[class*="span"]{margin-left:20px}input.span12,textarea.span12,.uneditable-input.span12{width:710px}input.span11,textarea.span11,.uneditable-input.span11{width:648px}input.span10,textarea.span10,.uneditable-input.span10{width:586px}input.span9,textarea.span9,.uneditable-input.span9{width:524px}input.span8,textarea.span8,.uneditable-input.span8{width:462px}input.span7,textarea.span7,.uneditable-input.span7{width:400px}input.span6,textarea.span6,.uneditable-input.span6{width:338px}input.span5,textarea.span5,.uneditable-input.span5{width:276px}input.span4,textarea.span4,.uneditable-input.span4{width:214px}input.span3,textarea.span3,.uneditable-input.span3{width:152px}input.span2,textarea.span2,.uneditable-input.span2{width:90px}input.span1,textarea.span1,.uneditable-input.span1{width:28px}}@media(max-width:767px){body{padding-right:20px;padding-left:20px}.navbar-fixed-top,.navbar-fixed-bottom{margin-right:-20px;margin-left:-20px}.container-fluid{padding:0}.dl-horizontal dt{float:none;width:auto;clear:none;text-align:left}.dl-horizontal dd{margin-left:0}.container{width:auto}.row-fluid{width:100%}.row,.thumbnails{margin-left:0}.thumbnails>li{float:none;margin-left:0}[class*="span"],.row-fluid [class*="span"]{display:block;float:none;width:auto;margin-left:0}.span12,.row-fluid .span12{width:100%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.input-large,.input-xlarge,.input-xxlarge,input[class*="span"],select[class*="span"],textarea[class*="span"],.uneditable-input{display:block;width:100%;min-height:30px;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.input-prepend input,.input-append input,.input-prepend input[class*="span"],.input-append input[class*="span"]{display:inline-block;width:auto}.modal{position:fixed;top:20px;right:20px;left:20px;width:auto;margin:0}.modal.fade.in{top:auto}}@media(max-width:480px){.nav-collapse{-webkit-transform:translate3d(0,0,0)}.page-header h1 small{display:block;line-height:20px}input[type="checkbox"],input[type="radio"]{border:1px solid #ccc}.form-horizontal .control-group>label{float:none;width:auto;padding-top:0;text-align:left}.form-horizontal .controls{margin-left:0}.form-horizontal .control-list{padding-top:0}.form-horizontal .form-actions{padding-right:10px;padding-left:10px}.modal{top:10px;right:10px;left:10px}.modal-header .close{padding:10px;margin:-10px}.carousel-caption{position:static}}@media(max-width:979px){body{padding-top:0}.navbar-fixed-top,.navbar-fixed-bottom{position:static}.navbar-fixed-top{margin-bottom:20px}.navbar-fixed-bottom{margin-top:20px}.navbar-fixed-top .navbar-inner,.navbar-fixed-bottom .navbar-inner{padding:5px}.navbar .container{width:auto;padding:0}.navbar .brand{padding-right:10px;padding-left:10px;margin:0 0 0 -5px}.nav-collapse{clear:both}.nav-collapse .nav{float:none;margin:0 0 10px}.nav-collapse .nav>li{float:none}.nav-collapse .nav>li>a{margin-bottom:2px}.nav-collapse .nav>.divider-vertical{display:none}.nav-collapse .nav .nav-header{color:#555;text-shadow:none}.nav-collapse .nav>li>a,.nav-collapse .dropdown-menu a{padding:9px 15px;font-weight:bold;color:#555;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.nav-collapse .btn{padding:4px 10px 4px;font-weight:normal;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.nav-collapse .dropdown-menu li+li a{margin-bottom:2px}.nav-collapse .nav>li>a:hover,.nav-collapse .dropdown-menu a:hover{background-color:#f2f2f2}.navbar-inverse .nav-collapse .nav>li>a:hover,.navbar-inverse .nav-collapse .dropdown-menu a:hover{background-color:#111}.nav-collapse.in .btn-group{padding:0;margin-top:5px}.nav-collapse .dropdown-menu{position:static;top:auto;left:auto;display:block;float:none;max-width:none;padding:0;margin:0 15px;background-color:transparent;border:0;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0;-webkit-box-shadow:none;-moz-box-shadow:none;box-shadow:none}.nav-collapse .dropdown-menu:before,.nav-collapse .dropdown-menu:after{display:none}.nav-collapse .dropdown-menu .divider{display:none}.nav-collapse .navbar-form,.nav-collapse .navbar-search{float:none;padding:10px 15px;margin:10px 0;border-top:1px solid #f2f2f2;border-bottom:1px solid #f2f2f2;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1);-moz-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1);box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1)}.navbar .nav-collapse .nav.pull-right{float:none;margin-left:0}.nav-collapse,.nav-collapse.collapse{height:0;overflow:hidden}.navbar .btn-navbar{display:block}.navbar-static .navbar-inner{padding-right:10px;padding-left:10px}}@media(min-width:980px){.nav-collapse.collapse{height:auto!important;overflow:visible!important}} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/css/bootstrap.min.css b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/css/bootstrap.min.css deleted file mode 100644 index 4a4440c..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/css/bootstrap.min.css +++ /dev/null @@ -1,10 +0,0 @@ -/*! - * Bootstrap v2.1.0 - * - * Copyright 2012 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world @twitter by @mdo and @fat. - * Augmented by Eric Kryski @ekryski - */article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}audio:not([controls]){display:none}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}a:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}a:hover,a:active{outline:0}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}img{height:auto;max-width:100%;vertical-align:middle;border:0;-ms-interpolation-mode:bicubic}#map-canvas img{max-width:none}button,input,select,textarea{margin:0;font-size:100%;vertical-align:middle}button,input{*overflow:visible;line-height:normal}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}button,input[type="button"],input[type="reset"],input[type="submit"]{cursor:pointer;-webkit-appearance:button}input[type="search"]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type="search"]::-webkit-search-decoration,input[type="search"]::-webkit-search-cancel-button{-webkit-appearance:none}textarea{overflow:auto;vertical-align:top}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;line-height:0;content:""}.clearfix:after{clear:both}.hide-text{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.input-block-level{display:block;width:100%;min-height:30px;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}body{margin:0;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:20px;color:#333;background-color:#fff}a{color:#08c;text-decoration:none}a:hover{color:#005580;text-decoration:underline}.img-rounded{-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.img-polaroid{padding:4px;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.2);-webkit-box-shadow:0 1px 3px rgba(0,0,0,0.1);-moz-box-shadow:0 1px 3px rgba(0,0,0,0.1);box-shadow:0 1px 3px rgba(0,0,0,0.1)}.img-circle{-webkit-border-radius:500px;-moz-border-radius:500px;border-radius:500px}.row{margin-left:-20px;*zoom:1}.row:before,.row:after{display:table;line-height:0;content:""}.row:after{clear:both}[class*="span"]{float:left;margin-left:20px}.container,.navbar-static-top .container,.navbar-fixed-top .container,.navbar-fixed-bottom .container{width:940px}.span12{width:940px}.span11{width:860px}.span10{width:780px}.span9{width:700px}.span8{width:620px}.span7{width:540px}.span6{width:460px}.span5{width:380px}.span4{width:300px}.span3{width:220px}.span2{width:140px}.span1{width:60px}.offset12{margin-left:980px}.offset11{margin-left:900px}.offset10{margin-left:820px}.offset9{margin-left:740px}.offset8{margin-left:660px}.offset7{margin-left:580px}.offset6{margin-left:500px}.offset5{margin-left:420px}.offset4{margin-left:340px}.offset3{margin-left:260px}.offset2{margin-left:180px}.offset1{margin-left:100px}.row-fluid{width:100%;*zoom:1}.row-fluid:before,.row-fluid:after{display:table;line-height:0;content:""}.row-fluid:after{clear:both}.row-fluid [class*="span"]{display:block;float:left;width:100%;min-height:30px;margin-left:2.127659574468085%;*margin-left:2.074468085106383%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.row-fluid [class*="span"]:first-child{margin-left:0}.row-fluid .span12{width:100%;*width:99.94680851063829%}.row-fluid .span11{width:91.48936170212765%;*width:91.43617021276594%}.row-fluid .span10{width:82.97872340425532%;*width:82.92553191489361%}.row-fluid .span9{width:74.46808510638297%;*width:74.41489361702126%}.row-fluid .span8{width:65.95744680851064%;*width:65.90425531914893%}.row-fluid .span7{width:57.44680851063829%;*width:57.39361702127659%}.row-fluid .span6{width:48.93617021276595%;*width:48.88297872340425%}.row-fluid .span5{width:40.42553191489362%;*width:40.37234042553192%}.row-fluid .span4{width:31.914893617021278%;*width:31.861702127659576%}.row-fluid .span3{width:23.404255319148934%;*width:23.351063829787233%}.row-fluid .span2{width:14.893617021276595%;*width:14.840425531914894%}.row-fluid .span1{width:6.382978723404255%;*width:6.329787234042553%}.row-fluid .offset12{margin-left:104.25531914893617%;*margin-left:104.14893617021275%}.row-fluid .offset12:first-child{margin-left:102.12765957446808%;*margin-left:102.02127659574467%}.row-fluid .offset11{margin-left:95.74468085106382%;*margin-left:95.6382978723404%}.row-fluid .offset11:first-child{margin-left:93.61702127659574%;*margin-left:93.51063829787232%}.row-fluid .offset10{margin-left:87.23404255319149%;*margin-left:87.12765957446807%}.row-fluid .offset10:first-child{margin-left:85.1063829787234%;*margin-left:84.99999999999999%}.row-fluid .offset9{margin-left:78.72340425531914%;*margin-left:78.61702127659572%}.row-fluid .offset9:first-child{margin-left:76.59574468085106%;*margin-left:76.48936170212764%}.row-fluid .offset8{margin-left:70.2127659574468%;*margin-left:70.10638297872339%}.row-fluid .offset8:first-child{margin-left:68.08510638297872%;*margin-left:67.9787234042553%}.row-fluid .offset7{margin-left:61.70212765957446%;*margin-left:61.59574468085106%}.row-fluid .offset7:first-child{margin-left:59.574468085106375%;*margin-left:59.46808510638297%}.row-fluid .offset6{margin-left:53.191489361702125%;*margin-left:53.085106382978715%}.row-fluid .offset6:first-child{margin-left:51.063829787234035%;*margin-left:50.95744680851063%}.row-fluid .offset5{margin-left:44.68085106382979%;*margin-left:44.57446808510638%}.row-fluid .offset5:first-child{margin-left:42.5531914893617%;*margin-left:42.4468085106383%}.row-fluid .offset4{margin-left:36.170212765957444%;*margin-left:36.06382978723405%}.row-fluid .offset4:first-child{margin-left:34.04255319148936%;*margin-left:33.93617021276596%}.row-fluid .offset3{margin-left:27.659574468085104%;*margin-left:27.5531914893617%}.row-fluid .offset3:first-child{margin-left:25.53191489361702%;*margin-left:25.425531914893618%}.row-fluid .offset2{margin-left:19.148936170212764%;*margin-left:19.04255319148936%}.row-fluid .offset2:first-child{margin-left:17.02127659574468%;*margin-left:16.914893617021278%}.row-fluid .offset1{margin-left:10.638297872340425%;*margin-left:10.53191489361702%}.row-fluid .offset1:first-child{margin-left:8.51063829787234%;*margin-left:8.404255319148938%}[class*="span"].hide,.row-fluid [class*="span"].hide{display:none}[class*="span"].pull-right,.row-fluid [class*="span"].pull-right{float:right}.container{margin-right:auto;margin-left:auto;*zoom:1}.container:before,.container:after{display:table;line-height:0;content:""}.container:after{clear:both}.container-fluid{padding-right:20px;padding-left:20px;*zoom:1}.container-fluid:before,.container-fluid:after{display:table;line-height:0;content:""}.container-fluid:after{clear:both}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:20px;font-weight:200;line-height:30px}small{font-size:85%}strong{font-weight:bold}em{font-style:italic}cite{font-style:normal}.muted{color:#999}h1,h2,h3,h4,h5,h6{margin:10px 0;font-family:inherit;font-weight:bold;line-height:1;color:inherit;text-rendering:optimizelegibility}h1 small,h2 small,h3 small,h4 small,h5 small,h6 small{font-weight:normal;line-height:1;color:#999}h1{font-size:36px;line-height:40px}h2{font-size:30px;line-height:40px}h3{font-size:24px;line-height:40px}h4{font-size:18px;line-height:20px}h5{font-size:14px;line-height:20px}h6{font-size:12px;line-height:20px}h1 small{font-size:24px}h2 small{font-size:18px}h3 small{font-size:14px}h4 small{font-size:14px}.page-header{padding-bottom:9px;margin:20px 0 30px;border-bottom:1px solid #eee}ul,ol{padding:0;margin:0 0 10px 25px}ul ul,ul ol,ol ol,ol ul{margin-bottom:0}li{line-height:20px}ul.unstyled,ol.unstyled{margin-left:0;list-style:none}dl{margin-bottom:20px}dt,dd{line-height:20px}dt{font-weight:bold}dd{margin-left:10px}.dl-horizontal dt{float:left;width:120px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:130px}hr{margin:20px 0;border:0;border-top:1px solid #eee;border-bottom:1px solid #fff}abbr[title]{cursor:help;border-bottom:1px dotted #999}abbr.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:0 0 0 15px;margin:0 0 20px;border-left:5px solid #eee}blockquote p{margin-bottom:0;font-size:16px;font-weight:300;line-height:25px}blockquote small{display:block;line-height:20px;color:#999}blockquote small:before{content:'\2014 \00A0'}blockquote.pull-right{float:right;padding-right:15px;padding-left:0;border-right:5px solid #eee;border-left:0}blockquote.pull-right p,blockquote.pull-right small{text-align:right}blockquote.pull-right small:before{content:''}blockquote.pull-right small:after{content:'\00A0 \2014'}q:before,q:after,blockquote:before,blockquote:after{content:""}address{display:block;margin-bottom:20px;font-style:normal;line-height:20px}code,pre{padding:0 3px 2px;font-family:Monaco,Menlo,Consolas,"Courier New",monospace;font-size:12px;color:#333;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}code{padding:2px 4px;color:#d14;background-color:#f7f7f9;border:1px solid #e1e1e8}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:20px;word-break:break-all;word-wrap:break-word;white-space:pre;white-space:pre-wrap;background-color:#f5f5f5;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.15);-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}pre.prettyprint{margin-bottom:20px}pre code{padding:0;color:inherit;background-color:transparent;border:0}.pre-scrollable{max-height:340px;overflow-y:scroll}form{margin:0 0 20px}fieldset{padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:40px;color:#333;border:0;border-bottom:1px solid #e5e5e5}legend small{font-size:15px;color:#999}label,input,button,select,textarea{font-size:14px;font-weight:normal;line-height:20px}input,button,select,textarea{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif}label{display:block;margin-bottom:5px}select,textarea,input[type="text"],input[type="password"],input[type="datetime"],input[type="datetime-local"],input[type="date"],input[type="month"],input[type="time"],input[type="week"],input[type="number"],input[type="email"],input[type="url"],input[type="search"],input[type="tel"],input[type="color"],.uneditable-input{display:inline-block;height:20px;padding:4px 6px;margin-bottom:9px;font-size:14px;line-height:20px;color:#555;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}input,textarea{width:210px}textarea{height:auto}textarea,input[type="text"],input[type="password"],input[type="datetime"],input[type="datetime-local"],input[type="date"],input[type="month"],input[type="time"],input[type="week"],input[type="number"],input[type="email"],input[type="url"],input[type="search"],input[type="tel"],input[type="color"],.uneditable-input{background-color:#fff;border:1px solid #ccc;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-webkit-transition:border linear .2s,box-shadow linear .2s;-moz-transition:border linear .2s,box-shadow linear .2s;-o-transition:border linear .2s,box-shadow linear .2s;transition:border linear .2s,box-shadow linear .2s}textarea:focus,input[type="text"]:focus,input[type="password"]:focus,input[type="datetime"]:focus,input[type="datetime-local"]:focus,input[type="date"]:focus,input[type="month"]:focus,input[type="time"]:focus,input[type="week"]:focus,input[type="number"]:focus,input[type="email"]:focus,input[type="url"]:focus,input[type="search"]:focus,input[type="tel"]:focus,input[type="color"]:focus,.uneditable-input:focus{border-color:rgba(82,168,236,0.8);outline:0;outline:thin dotted \9;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(82,168,236,0.6);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(82,168,236,0.6);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(82,168,236,0.6)}input[type="radio"],input[type="checkbox"]{margin:4px 0 0;margin-top:1px \9;*margin-top:0;line-height:normal;cursor:pointer}input[type="file"],input[type="image"],input[type="submit"],input[type="reset"],input[type="button"],input[type="radio"],input[type="checkbox"]{width:auto}select,input[type="file"]{height:30px;*margin-top:4px;line-height:30px}select{width:220px;background-color:#fff;border:1px solid #bbb}select[multiple],select[size]{height:auto}select:focus,input[type="file"]:focus,input[type="radio"]:focus,input[type="checkbox"]:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.uneditable-input,.uneditable-textarea{color:#999;cursor:not-allowed;background-color:#fcfcfc;border-color:#ccc;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.025);-moz-box-shadow:inset 0 1px 2px rgba(0,0,0,0.025);box-shadow:inset 0 1px 2px rgba(0,0,0,0.025)}.uneditable-input{overflow:hidden;white-space:nowrap}.uneditable-textarea{width:auto;height:auto}input:-moz-placeholder,textarea:-moz-placeholder{color:#999}input:-ms-input-placeholder,textarea:-ms-input-placeholder{color:#999}input::-webkit-input-placeholder,textarea::-webkit-input-placeholder{color:#999}.radio,.checkbox{min-height:18px;padding-left:18px}.radio input[type="radio"],.checkbox input[type="checkbox"]{float:left;margin-left:-18px}.controls>.radio:first-child,.controls>.checkbox:first-child{padding-top:5px}.radio.inline,.checkbox.inline{display:inline-block;padding-top:5px;margin-bottom:0;vertical-align:middle}.radio.inline+.radio.inline,.checkbox.inline+.checkbox.inline{margin-left:10px}.input-mini{width:60px}.input-small{width:90px}.input-medium{width:150px}.input-large{width:210px}.input-xlarge{width:270px}.input-xxlarge{width:530px}input[class*="span"],select[class*="span"],textarea[class*="span"],.uneditable-input[class*="span"],.row-fluid input[class*="span"],.row-fluid select[class*="span"],.row-fluid textarea[class*="span"],.row-fluid .uneditable-input[class*="span"]{float:none;margin-left:0}.input-append input[class*="span"],.input-append .uneditable-input[class*="span"],.input-prepend input[class*="span"],.input-prepend .uneditable-input[class*="span"],.row-fluid input[class*="span"],.row-fluid select[class*="span"],.row-fluid textarea[class*="span"],.row-fluid .uneditable-input[class*="span"],.row-fluid .input-prepend [class*="span"],.row-fluid .input-append [class*="span"]{display:inline-block}input,textarea,.uneditable-input{margin-left:0}.controls-row [class*="span"]+[class*="span"]{margin-left:20px}input.span12,textarea.span12,.uneditable-input.span12{width:926px}input.span11,textarea.span11,.uneditable-input.span11{width:846px}input.span10,textarea.span10,.uneditable-input.span10{width:766px}input.span9,textarea.span9,.uneditable-input.span9{width:686px}input.span8,textarea.span8,.uneditable-input.span8{width:606px}input.span7,textarea.span7,.uneditable-input.span7{width:526px}input.span6,textarea.span6,.uneditable-input.span6{width:446px}input.span5,textarea.span5,.uneditable-input.span5{width:366px}input.span4,textarea.span4,.uneditable-input.span4{width:286px}input.span3,textarea.span3,.uneditable-input.span3{width:206px}input.span2,textarea.span2,.uneditable-input.span2{width:126px}input.span1,textarea.span1,.uneditable-input.span1{width:46px}.controls-row{*zoom:1}.controls-row:before,.controls-row:after{display:table;line-height:0;content:""}.controls-row:after{clear:both}.controls-row [class*="span"]{float:left}input[disabled],select[disabled],textarea[disabled],input[readonly],select[readonly],textarea[readonly]{cursor:not-allowed;background-color:#eee}input[type="radio"][disabled],input[type="checkbox"][disabled],input[type="radio"][readonly],input[type="checkbox"][readonly]{background-color:transparent}.control-group.warning>label,.control-group.warning .help-block,.control-group.warning .help-inline{color:#c09853}.control-group.warning .checkbox,.control-group.warning .radio,.control-group.warning input,.control-group.warning select,.control-group.warning textarea{color:#c09853;border-color:#c09853;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.control-group.warning .checkbox:focus,.control-group.warning .radio:focus,.control-group.warning input:focus,.control-group.warning select:focus,.control-group.warning textarea:focus{border-color:#a47e3c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e;-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e}.control-group.warning .input-prepend .add-on,.control-group.warning .input-append .add-on{color:#c09853;background-color:#fcf8e3;border-color:#c09853}.control-group.error>label,.control-group.error .help-block,.control-group.error .help-inline{color:#b94a48}.control-group.error .checkbox,.control-group.error .radio,.control-group.error input,.control-group.error select,.control-group.error textarea{color:#b94a48;border-color:#b94a48;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.control-group.error .checkbox:focus,.control-group.error .radio:focus,.control-group.error input:focus,.control-group.error select:focus,.control-group.error textarea:focus{border-color:#953b39;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392;-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392}.control-group.error .input-prepend .add-on,.control-group.error .input-append .add-on{color:#b94a48;background-color:#f2dede;border-color:#b94a48}.control-group.success>label,.control-group.success .help-block,.control-group.success .help-inline{color:#468847}.control-group.success .checkbox,.control-group.success .radio,.control-group.success input,.control-group.success select,.control-group.success textarea{color:#468847;border-color:#468847;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.control-group.success .checkbox:focus,.control-group.success .radio:focus,.control-group.success input:focus,.control-group.success select:focus,.control-group.success textarea:focus{border-color:#356635;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b;-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b}.control-group.success .input-prepend .add-on,.control-group.success .input-append .add-on{color:#468847;background-color:#dff0d8;border-color:#468847}input:focus:required:invalid,textarea:focus:required:invalid,select:focus:required:invalid{color:#b94a48;border-color:#ee5f5b}input:focus:required:invalid:focus,textarea:focus:required:invalid:focus,select:focus:required:invalid:focus{border-color:#e9322d;-webkit-box-shadow:0 0 6px #f8b9b7;-moz-box-shadow:0 0 6px #f8b9b7;box-shadow:0 0 6px #f8b9b7}.form-actions{padding:19px 20px 20px;margin-top:20px;margin-bottom:20px;background-color:#f5f5f5;border-top:1px solid #e5e5e5;*zoom:1}.form-actions:before,.form-actions:after{display:table;line-height:0;content:""}.form-actions:after{clear:both}.help-block,.help-inline{color:#595959}.help-block{display:block;margin-bottom:10px}.help-inline{display:inline-block;*display:inline;padding-left:5px;vertical-align:middle;*zoom:1}.input-append,.input-prepend{margin-bottom:5px;font-size:0;white-space:nowrap}.input-append input,.input-prepend input,.input-append select,.input-prepend select,.input-append .uneditable-input,.input-prepend .uneditable-input{position:relative;margin-bottom:0;*margin-left:0;font-size:14px;vertical-align:top;-webkit-border-radius:0 3px 3px 0;-moz-border-radius:0 3px 3px 0;border-radius:0 3px 3px 0}.input-append input:focus,.input-prepend input:focus,.input-append select:focus,.input-prepend select:focus,.input-append .uneditable-input:focus,.input-prepend .uneditable-input:focus{z-index:2}.input-append .add-on,.input-prepend .add-on{display:inline-block;width:auto;height:20px;min-width:16px;padding:4px 5px;font-size:14px;font-weight:normal;line-height:20px;text-align:center;text-shadow:0 1px 0 #fff;background-color:#eee;border:1px solid #ccc}.input-append .add-on,.input-prepend .add-on,.input-append .btn,.input-prepend .btn{margin-left:-1px;vertical-align:top;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.input-append .active,.input-prepend .active{background-color:#a9dba9;border-color:#46a546}.input-prepend .add-on,.input-prepend .btn{margin-right:-1px}.input-prepend .add-on:first-child,.input-prepend .btn:first-child{-webkit-border-radius:3px 0 0 3px;-moz-border-radius:3px 0 0 3px;border-radius:3px 0 0 3px}.input-append input,.input-append select,.input-append .uneditable-input{-webkit-border-radius:3px 0 0 3px;-moz-border-radius:3px 0 0 3px;border-radius:3px 0 0 3px}.input-append .add-on:last-child,.input-append .btn:last-child{-webkit-border-radius:0 3px 3px 0;-moz-border-radius:0 3px 3px 0;border-radius:0 3px 3px 0}.input-prepend.input-append input,.input-prepend.input-append select,.input-prepend.input-append .uneditable-input{-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.input-prepend.input-append .add-on:first-child,.input-prepend.input-append .btn:first-child{margin-right:-1px;-webkit-border-radius:3px 0 0 3px;-moz-border-radius:3px 0 0 3px;border-radius:3px 0 0 3px}.input-prepend.input-append .add-on:last-child,.input-prepend.input-append .btn:last-child{margin-left:-1px;-webkit-border-radius:0 3px 3px 0;-moz-border-radius:0 3px 3px 0;border-radius:0 3px 3px 0}input.search-query{padding-right:14px;padding-right:4px \9;padding-left:14px;padding-left:4px \9;margin-bottom:0;-webkit-border-radius:15px;-moz-border-radius:15px;border-radius:15px}.form-search .input-append .search-query,.form-search .input-prepend .search-query{-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.form-search .input-append .search-query{-webkit-border-radius:14px 0 0 14px;-moz-border-radius:14px 0 0 14px;border-radius:14px 0 0 14px}.form-search .input-append .btn{-webkit-border-radius:0 14px 14px 0;-moz-border-radius:0 14px 14px 0;border-radius:0 14px 14px 0}.form-search .input-prepend .search-query{-webkit-border-radius:0 14px 14px 0;-moz-border-radius:0 14px 14px 0;border-radius:0 14px 14px 0}.form-search .input-prepend .btn{-webkit-border-radius:14px 0 0 14px;-moz-border-radius:14px 0 0 14px;border-radius:14px 0 0 14px}.form-search input,.form-inline input,.form-horizontal input,.form-search textarea,.form-inline textarea,.form-horizontal textarea,.form-search select,.form-inline select,.form-horizontal select,.form-search .help-inline,.form-inline .help-inline,.form-horizontal .help-inline,.form-search .uneditable-input,.form-inline .uneditable-input,.form-horizontal .uneditable-input,.form-search .input-prepend,.form-inline .input-prepend,.form-horizontal .input-prepend,.form-search .input-append,.form-inline .input-append,.form-horizontal .input-append{display:inline-block;*display:inline;margin-bottom:0;vertical-align:middle;*zoom:1}.form-search .hide,.form-inline .hide,.form-horizontal .hide{display:none}.form-search label,.form-inline label,.form-search .btn-group,.form-inline .btn-group{display:inline-block}.form-search .input-append,.form-inline .input-append,.form-search .input-prepend,.form-inline .input-prepend{margin-bottom:0}.form-search .radio,.form-search .checkbox,.form-inline .radio,.form-inline .checkbox{padding-left:0;margin-bottom:0;vertical-align:middle}.form-search .radio input[type="radio"],.form-search .checkbox input[type="checkbox"],.form-inline .radio input[type="radio"],.form-inline .checkbox input[type="checkbox"]{float:left;margin-right:3px;margin-left:0}.control-group{margin-bottom:10px}legend+.control-group{margin-top:20px;-webkit-margin-top-collapse:separate}.form-horizontal .control-group{margin-bottom:20px;*zoom:1}.form-horizontal .control-group:before,.form-horizontal .control-group:after{display:table;line-height:0;content:""}.form-horizontal .control-group:after{clear:both}.form-horizontal .control-label{float:left;width:140px;padding-top:5px;text-align:right}.form-horizontal .controls{*display:inline-block;*padding-left:20px;margin-left:160px;*margin-left:0}.form-horizontal .controls:first-child{*padding-left:160px}.form-horizontal .help-block{margin-top:10px;margin-bottom:0}.form-horizontal .form-actions{padding-left:160px}table{max-width:100%;background-color:transparent;border-collapse:collapse;border-spacing:0}.table{width:100%;margin-bottom:20px}.table th,.table td{padding:8px;line-height:20px;text-align:left;vertical-align:top;border-top:1px solid #ddd}.table th{font-weight:bold}.table thead th{vertical-align:bottom}.table caption+thead tr:first-child th,.table caption+thead tr:first-child td,.table colgroup+thead tr:first-child th,.table colgroup+thead tr:first-child td,.table thead:first-child tr:first-child th,.table thead:first-child tr:first-child td{border-top:0}.table tbody+tbody{border-top:2px solid #ddd}.table-condensed th,.table-condensed td{padding:4px 5px}.table-bordered{border:1px solid #ddd;border-collapse:separate;*border-collapse:collapse;border-left:0;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.table-bordered th,.table-bordered td{border-left:1px solid #ddd}.table-bordered caption+thead tr:first-child th,.table-bordered caption+tbody tr:first-child th,.table-bordered caption+tbody tr:first-child td,.table-bordered colgroup+thead tr:first-child th,.table-bordered colgroup+tbody tr:first-child th,.table-bordered colgroup+tbody tr:first-child td,.table-bordered thead:first-child tr:first-child th,.table-bordered tbody:first-child tr:first-child th,.table-bordered tbody:first-child tr:first-child td{border-top:0}.table-bordered thead:first-child tr:first-child th:first-child,.table-bordered tbody:first-child tr:first-child td:first-child{-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-topleft:4px}.table-bordered thead:first-child tr:first-child th:last-child,.table-bordered tbody:first-child tr:first-child td:last-child{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-moz-border-radius-topright:4px}.table-bordered thead:last-child tr:last-child th:first-child,.table-bordered tbody:last-child tr:last-child td:first-child,.table-bordered tfoot:last-child tr:last-child td:first-child{-webkit-border-radius:0 0 0 4px;-moz-border-radius:0 0 0 4px;border-radius:0 0 0 4px;-webkit-border-bottom-left-radius:4px;border-bottom-left-radius:4px;-moz-border-radius-bottomleft:4px}.table-bordered thead:last-child tr:last-child th:last-child,.table-bordered tbody:last-child tr:last-child td:last-child,.table-bordered tfoot:last-child tr:last-child td:last-child{-webkit-border-bottom-right-radius:4px;border-bottom-right-radius:4px;-moz-border-radius-bottomright:4px}.table-bordered caption+thead tr:first-child th:first-child,.table-bordered caption+tbody tr:first-child td:first-child,.table-bordered colgroup+thead tr:first-child th:first-child,.table-bordered colgroup+tbody tr:first-child td:first-child{-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-topleft:4px}.table-bordered caption+thead tr:first-child th:last-child,.table-bordered caption+tbody tr:first-child td:last-child,.table-bordered colgroup+thead tr:first-child th:last-child,.table-bordered colgroup+tbody tr:first-child td:last-child{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-moz-border-right-topleft:4px}.table-striped tbody tr:nth-child(odd) td,.table-striped tbody tr:nth-child(odd) th{background-color:#f9f9f9}.table-hover tbody tr:hover td,.table-hover tbody tr:hover th{background-color:#f5f5f5}table [class*=span],.row-fluid table [class*=span]{display:table-cell;float:none;margin-left:0}table .span1{float:none;width:44px;margin-left:0}table .span2{float:none;width:124px;margin-left:0}table .span3{float:none;width:204px;margin-left:0}table .span4{float:none;width:284px;margin-left:0}table .span5{float:none;width:364px;margin-left:0}table .span6{float:none;width:444px;margin-left:0}table .span7{float:none;width:524px;margin-left:0}table .span8{float:none;width:604px;margin-left:0}table .span9{float:none;width:684px;margin-left:0}table .span10{float:none;width:764px;margin-left:0}table .span11{float:none;width:844px;margin-left:0}table .span12{float:none;width:924px;margin-left:0}table .span13{float:none;width:1004px;margin-left:0}table .span14{float:none;width:1084px;margin-left:0}table .span15{float:none;width:1164px;margin-left:0}table .span16{float:none;width:1244px;margin-left:0}table .span17{float:none;width:1324px;margin-left:0}table .span18{float:none;width:1404px;margin-left:0}table .span19{float:none;width:1484px;margin-left:0}table .span20{float:none;width:1564px;margin-left:0}table .span21{float:none;width:1644px;margin-left:0}table .span22{float:none;width:1724px;margin-left:0}table .span23{float:none;width:1804px;margin-left:0}table .span24{float:none;width:1884px;margin-left:0}.table tbody tr.success td{background-color:#dff0d8}.table tbody tr.error td{background-color:#f2dede}.table tbody tr.info td{background-color:#d9edf7}[class^="icon-"],[class*=" icon-"]{display:inline-block;width:32px;height:32px;margin-top:1px;margin-right:.3em;line-height:32px;vertical-align:text-top;background-image:url("../img/glyphicons.png");background-position:32px 32px;background-repeat:no-repeat}.icon-white,.nav>.active>a>[class^="icon-"],.nav>.active>a>[class*=" icon-"],.dropdown-menu>li>a:hover>[class^="icon-"],.dropdown-menu>li>a:hover>[class*=" icon-"],.dropdown-menu>.active>a>[class^="icon-"],.dropdown-menu>.active>a>[class*=" icon-"]{background-image:url("../img/glyphicons-white.png")}.icon-glass{background-position:0 0}.icon-leaf{background-position:-56px 0}.icon-dog{background-position:-112px 0}.icon-user{background-position:-170px 0}.icon-girl{background-position:-225px 0}.icon-car{background-position:-279px 0}.icon-user-add{background-position:-337px 0}.icon-user-remove{background-position:-397px 0}.icon-film{background-position:-455px 0}.icon-magic{background-position:-509px 0}.icon-envelope{background-position:-565px 0}.icon-camera{background-position:-621px 0}.icon-heart{background-position:-679px 0}.icon-beach-umbrella{background-position:-735px 0}.icon-train{background-position:-792px 0}.icon-print{background-position:-844px 0}.icon-bin{background-position:-900px 0}.icon-music{background-position:-952px 0}.icon-note{background-position:-1005px 0}.icon-cogwheel{background-position:-1055px 0}.icon-home{background-position:-1111px 0}.icon-snowflake{background-position:-1170px 0}.icon-fire{background-position:-1230px 0}.icon-cogwheels{background-position:-1282px 0}.icon-parents{background-position:-1340px 0}.icon-binoculars{background-position:-1404px 0}.icon-road{background-position:-1460px 0}.icon-search{background-position:-1520px 0}.icon-cars{background-position:-1576px 0}.icon-notes-2{background-position:-1644px 0}.icon-pencil{background-position:-1696px 0}.icon-bus{background-position:-1753px 0}.icon-wifi-alt{background-position:-1817px 0}.icon-luggage{background-position:-1875px 0}.icon-old-man{background-position:-1927px 0}.icon-woman{background-position:0 -60px}.icon-file{background-position:-54px -60px}.icon-credit{background-position:-105px -60px}.icon-airplane{background-position:-163px -60px}.icon-notes{background-position:-219px -60px}.icon-stats{background-position:-271px -60px}.icon-charts{background-position:-329px -60px}.icon-pie-chart{background-position:-388px -60px}.icon-group{background-position:-446px -60px}.icon-keys{background-position:-513px -60px}.icon-calendar{background-position:-569px -60px}.icon-router{background-position:-624px -60px}.icon-camera-small{background-position:-683px -60px}.icon-dislikes{background-position:-737px -60px}.icon-star{background-position:-795px -60px}.icon-link{background-position:-852px -60px}.icon-eye-open{background-position:-905px -60px}.icon-eye-close{background-position:-968px -60px}.icon-alarm{background-position:-1031px -60px}.icon-clock{background-position:-1091px -60px}.icon-stopwatch{background-position:-1147px -60px}.icon-projector{background-position:-1202px -60px}.icon-history{background-position:-1262px -60px}.icon-truck{background-position:-1319px -60px}.icon-cargo{background-position:-1383px -60px}.icon-compass{background-position:-1440px -60px}.icon-keynote{background-position:-1496px -60px}.icon-attach{background-position:-1548px -60px}.icon-power{background-position:-1606px -60px}.icon-lightbulb{background-position:-1660px -60px}.icon-tag{background-position:-1712px -60px}.icon-tags{background-position:-1768px -60px}.icon-cleaning{background-position:-1830px -60px}.icon-ruler{background-position:-1886px -60px}.icon-gift{background-position:-1945px -60px}.icon-umbrella{background-position:0 -122px}.icon-book{background-position:-58px -122px}.icon-bookmark{background-position:-112px -122px}.icon-signal{background-position:-160px -122px}.icon-cup{background-position:-223px -122px}.icon-stroller{background-position:-277px -122px}.icon-headphones{background-position:-334px -122px}.icon-headset{background-position:-390px -122px}.icon-warning-sign{background-position:-446px -122px}.icon-signal{background-position:-507px -122px}.icon-retweet{background-position:-563px -122px}.icon-refresh{background-position:-625px -122px}.icon-roundabout{background-position:-682px -122px}.icon-random{background-position:-741px -122px}.icon-heat{background-position:-801px -122px}.icon-repeat{background-position:-862px -122px}.icon-display{background-position:-918px -122px}.icon-log-book{background-position:-978px -122px}.icon-adress-book{background-position:-1032px -122px}.icon-magnet{background-position:-1086px -122px}.icon-table{background-position:-1139px -122px}.icon-adjust{background-position:-1195px -122px}.icon-tint{background-position:-1253px -122px}.icon-crop{background-position:-1308px -122px}.icon-vector-path-square{background-position:-1366px -122px}.icon-vector-path-circle{background-position:-1422px -122px}.icon-vector-path-polygon{background-position:-1478px -122px}.icon-vector-path-line{background-position:-1536px -122px}.icon-vector-path-curve{background-position:-1592px -122px}.icon-vector-path-all{background-position:-1648px -122px}.icon-font{background-position:-1704px -122px}.icon-italic{background-position:-1763px -122px}.icon-bold{background-position:-1809px -122px}.icon-text-underline{background-position:-1860px -122px}.icon-text-strike{background-position:-1912px -122px}.icon-text-height{background-position:-1964px -122px}.icon-text-width{background-position:0 -184px}.icon-text-resize{background-position:-54px -184px}.icon-left-indent{background-position:-112px -184px}.icon-right-indent{background-position:-168px -184px}.icon-align-left{background-position:-224px -184px}.icon-align-center{background-position:-280px -184px}.icon-align-right{background-position:-336px -184px}.icon-justify{background-position:-392px -184px}.icon-list{background-position:-448px -184px}.icon-text-smaller{background-position:-504px -184px}.icon-text-bigger{background-position:-558px -184px}.icon-embed{background-position:-614px -184px}.icon-embed-close{background-position:-676px -184px}.icon-adjust{background-position:-738px -184px}.icon-message-full{background-position:-793px -184px}.icon-message-empty{background-position:-849px -184px}.icon-message-in{background-position:-905px -184px}.icon-message-out{background-position:-961px -184px}.icon-message-plus{background-position:-1017px -184px}.icon-message-minus{background-position:-1078px -184px}.icon-message-ban{background-position:-1139px -184px}.icon-message-flag{background-position:-1200px -184px}.icon-message-lock{background-position:-1259px -184px}.icon-message-new{background-position:-1319px -184px}.icon-inbox{background-position:-1379px -184px}.icon-inbox-plus{background-position:-1435px -184px}.icon-inbox-minus{background-position:-1494px -184px}.icon-inbox-lock{background-position:-1553px -184px}.icon-inbox-in{background-position:-1611px -184px}.icon-inbox-out{background-position:-1667px -184px}.icon-computer-locked{background-position:-1723px -184px}.icon-computer-service{background-position:-1783px -184px}.icon-computer-process{background-position:-1843px -184px}.icon-phone{background-position:-1903px -184px}.icon-database-lock{background-position:-1950px -184px}.icon-database-plus{background-position:0 -246px}.icon-database-minus{background-position:-59px -246px}.icon-database-ban{background-position:-118px -246px}.icon-folder-open{background-position:-176px -246px}.icon-folder-plus{background-position:-238px -246px}.icon-folder-minus{background-position:-299px -246px}.icon-folder-lock{background-position:-360px -246px}.icon-folder-flag{background-position:-420px -246px}.icon-folder-new{background-position:-479px -246px}.icon-check{background-position:-539px -246px}.icon-edit{background-position:-593px -246px}.icon-new-window{background-position:-649px -246px}.icon-more-windows{background-position:-707px -246px}.icon-show-big-thumbnails{background-position:-762px -246px}.icon-show-thumbnails{background-position:-816px -246px}.icon-show-thumbnails-with-lines{background-position:-870px -246px}.icon-show-lines{background-position:-926px -246px}.icon-playlist{background-position:-982px -246px}.icon-picture{background-position:-1043px -246px}.icon-imac{background-position:-1099px -246px}.icon-macbook{background-position:-1157px -246px}.icon-ipad{background-position:-1217px -246px}.icon-iphone{background-position:-1269px -246px}.icon-iphone-transfer{background-position:-1315px -246px}.icon-iphone-exchange{background-position:-1376px -246px}.icon-ipod{background-position:-1437px -246px}.icon-ipod-shuffle{background-position:-1483px -246px}.icon-ear-plugs{background-position:-1530px -246px}.icon-albums{background-position:-1582px -246px}.icon-step-backward{background-position:-1642px -246px}.icon-fast-backward{background-position:-1688px -246px}.icon-rewind{background-position:-1745px -246px}.icon-play{background-position:-1800px -246px}.icon-pause{background-position:-1848px -246px}.icon-stop{background-position:-1890px -246px}.icon-forward{background-position:-1936px -246px}.icon-fast-forward{background-position:0 -308px}.icon-step-forward{background-position:-57px -308px}.icon-eject{background-position:-103px -308px}.icon-facetime-video{background-position:-153px -308px}.icon-download-alt{background-position:-209px -308px}.icon-mute{background-position:-265px -308px}.icon-volume-down{background-position:-319px -308px}.icon-volume-up{background-position:-367px -308px}.icon-screenshot{background-position:-423px -308px}.icon-move{background-position:-482px -308px}.icon-more{background-position:-538px -308px}.icon-brightness-reduce{background-position:-592px -308px}.icon-brightness-increase{background-position:-644px -308px}.icon-circle-plus{background-position:-700px -308px}.icon-circle-minus{background-position:-758px -308px}.icon-circle-remove{background-position:-816px -308px}.icon-circle-ok{background-position:-874px -308px}.icon-circle-question-mark{background-position:-932px -308px}.icon-circle-info{background-position:-990px -308px}.icon-circle-exclamation-mark{background-position:-1048px -308px}.icon-remove{background-position:-1106px -308px}.icon-ok{background-position:-1164px -308px}.icon-ban{background-position:-1222px -308px}.icon-download{background-position:-1280px -308px}.icon-upload{background-position:-1338px -308px}.icon-shopping-cart{background-position:-1396px -308px}.icon-lock{background-position:-1454px -308px}.icon-unlock{background-position:-1507px -308px}.icon-electricity{background-position:-1560px -308px}.icon-ok-2{background-position:-1603px -308px}.icon-remove-2{background-position:-1660px -308px}.icon-cart-out{background-position:-1710px -308px}.icon-cart-in{background-position:-1768px -308px}.icon-left-arrow{background-position:-1826px -308px}.icon-right-arrow{background-position:-1878px -308px}.icon-down-arrow{background-position:-1930px -308px}.icon-up-arrow{background-position:0 -370px}.icon-resize-small{background-position:-50px -370px}.icon-resize-full{background-position:-106px -370px}.icon-circle-arrow-left{background-position:-162px -370px}.icon-circle-arrow-right{background-position:-220px -370px}.icon-circle-arrow-up{background-position:-278px -370px}.icon-circle-arrow-down{background-position:-336px -370px}.icon-play-button{background-position:-394px -370px}.icon-unshare{background-position:-452px -370px}.icon-share{background-position:-508px -370px}.icon-thin-arrow-right{background-position:-564px -370px}.icon-thin-arrow-left{background-position:-611px -370px}.icon-bluetooth{background-position:-658px -370px}.icon-euro{background-position:-704px -370px}.icon-usd{background-position:-758px -370px}.icon-bp{background-position:-807px -370px}.icon-retweet-2{background-position:-856px -370px}.icon-moon{background-position:-921px -370px}.icon-sun{background-position:-975px -370px}.icon-cloud{background-position:-1031px -370px}.icon-direction{background-position:-1090px -370px}.icon-brush{background-position:-1148px -370px}.icon-pen{background-position:-1205px -370px}.icon-zoom-in{background-position:-1261px -370px}.icon-zoom-out{background-position:-1318px -370px}.icon-pin{background-position:-1375px -370px}.icon-riflescope{background-position:-1417px -370px}.icon-rotation-lock{background-position:-1474px -370px}.icon-flash{background-position:-1533px -370px}.icon-google-maps{background-position:-1579px -370px}.icon-anchor{background-position:-1626px -370px}.icon-conversation{background-position:-1682px -370px}.icon-chat{background-position:-1739px -370px}.icon-male{background-position:-1795px -370px}.icon-female{background-position:-1849px -370px}.icon-asterisk{background-position:-1897px -370px}.icon-divide{background-position:-1949px -370px}.icon-snorkel-diving{background-position:0 -432px}.icon-scuba-diving{background-position:-59px -432px}.icon-oxygen-bottle{background-position:-118px -432px}.icon-fins{background-position:-172px -432px}.icon-fishes{background-position:-235px -432px}.icon-boat{background-position:-295px -432px}.icon-delete-point{background-position:-351px -432px}.icon-sheriffs-star{background-position:-409px -432px}.icon-qrcode{background-position:-465px -432px}.icon-barcode{background-position:-521px -432px}.icon-pool{background-position:-577px -432px}.icon-buoy{background-position:-633px -432px}.icon-spade{background-position:-689px -432px}.icon-bank{background-position:-745px -432px}.icon-vcard{background-position:-801px -432px}.icon-electrical-plug{background-position:-855px -432px}.icon-flag{background-position:-905px -432px}.icon-credit-card{background-position:-958px -432px}.icon-keyboard-wireless{background-position:-1016px -432px}.icon-keyboard-wired{background-position:-1075px -432px}.icon-shield{background-position:-1134px -432px}.icon-ring{background-position:-1188px -432px}.icon-cake{background-position:-1241px -432px}.icon-drink{background-position:-1295px -432px}.icon-beer{background-position:-1350px -432px}.icon-fast-food{background-position:-1405px -432px}.icon-cutlery{background-position:-1465px -432px}.icon-pizza{background-position:-1510px -432px}.icon-birthday-cake{background-position:-1568px -432px}.icon-tablet{background-position:-1626px -432px}.icon-settings{background-position:-1683px -432px}.icon-bullets{background-position:-1739px -432px}.icon-cardio{background-position:-1798px -432px}.icon-t-shirt{background-position:-1855px -432px}.icon-pants{background-position:-1915px -432px}.icon-sweater{background-position:-1966px -432px}.icon-fabric{background-position:0 -494px}.icon-leather{background-position:-59px -494px}.icon-scissors{background-position:-114px -494px}.icon-podium{background-position:-170px -494px}.icon-skull{background-position:-230px -494px}.icon-celebration{background-position:-284px -494px}.icon-tea-kettle{background-position:-340px -494px}.icon-french-press{background-position:-398px -494px}.icon-coffe-cup{background-position:-453px -494px}.icon-pot{background-position:-510px -494px}.icon-grater{background-position:-569px -494px}.icon-kettle{background-position:-619px -494px}.icon-hospital{background-position:-674px -494px}.icon-hospital-h{background-position:-730px -494px}.icon-microphone{background-position:-786px -494px}.icon-webcam{background-position:-835px -494px}.icon-temple-christianity-church{background-position:-886px -494px}.icon-temple-islam{background-position:-942px -494px}.icon-temple-hindu{background-position:-999px -494px}.icon-temple-buddhist{background-position:-1055px -494px}.icon-electrical-socket-eu{background-position:-1115px -494px}.icon-electrical-socket-us{background-position:-1170px -494px}.icon-bomb{background-position:-1225px -494px}.icon-comments{background-position:-1284px -494px}.icon-flower{background-position:-1340px -494px}.icon-baseball{background-position:-1391px -494px}.icon-rugby{background-position:-1448px -494px}.icon-ax{background-position:-1503px -494px}.icon-table-tennis{background-position:-1562px -494px}.icon-bowling{background-position:-1618px -494px}.icon-tree-conifer{background-position:-1674px -494px}.icon-tree-deciduous{background-position:-1727px -494px}.icon-more-items{background-position:-1779px -494px}.icon-sort{background-position:-1832px -494px}.icon-filter{background-position:-1889px -494px}.icon-gamepad{background-position:-1941px -494px}.icon-playing-dices{background-position:0 -556px}.icon-calculator{background-position:-59px -556px}.icon-tie{background-position:-112px -556px}.icon-wallet{background-position:-155px -556px}.icon-share{background-position:-212px -556px}.icon-sampler{background-position:-266px -556px}.icon-piano{background-position:-325px -556px}.icon-web-browser{background-position:-380px -556px}.icon-blog{background-position:-436px -556px}.icon-dashboard{background-position:-489px -556px}.icon-certificate{background-position:-545px -556px}.icon-bell{background-position:-594px -556px}.icon-candle{background-position:-650px -556px}.icon-pin-classic{background-position:-702px -556px}.icon-iphone-shake{background-position:-758px -556px}.icon-pin-flag{background-position:-814px -556px}.icon-turtle{background-position:-876px -556px}.icon-rabbit{background-position:-936px -556px}.icon-globe{background-position:-994px -556px}.icon-briefcase{background-position:-1050px -556px}.icon-hdd{background-position:-1106px -556px}.icon-thumbs-up{background-position:-1162px -556px}.icon-thumbs-down{background-position:-1218px -556px}.icon-hand-right{background-position:-1274px -556px}.icon-hand-left{background-position:-1332px -556px}.icon-hand-up{background-position:-1390px -556px}.icon-hand-down{background-position:-1441px -556px}.icon-fullscreen{background-position:-1492px -556px}.icon-shopping-bag{background-position:-1548px -556px}.icon-book-open{background-position:-1603px -556px}.icon-nameplate{background-position:-1660px -556px}.icon-nameplate-alt{background-position:-1716px -556px}.icon-vases{background-position:-1772px -556px}.icon-announcement{background-position:-1828px -556px}.icon-dumbbell{background-position:-1885px -556px}.icon-suitcase{background-position:-1943px -556px}.icon-file-import{background-position:0 -618px}.icon-file-export{background-position:-54px -618px}.icon-pinterest{background-position:-109px -618px}.icon-dropbox{background-position:-165px -618px}.icon-google-alt{background-position:-221px -618px}.icon-jolicloud{background-position:-277px -618px}.icon-yahoo{background-position:-333px -618px}.icon-blogger{background-position:-389px -618px}.icon-picasa{background-position:-445px -618px}.icon-amazon{background-position:-501px -618px}.icon-tumblr{background-position:-557px -618px}.icon-wordpress{background-position:-613px -618px}.icon-instapaper{background-position:-669px -618px}.icon-evernote{background-position:-725px -618px}.icon-xing{background-position:-781px -618px}.icon-zootool{background-position:-837px -618px}.icon-dribbble{background-position:-893px -618px}.icon-deviantart{background-position:-949px -618px}.icon-read-it-later{background-position:-1005px -618px}.icon-linked-in{background-position:-1061px -618px}.icon-forrst{background-position:-1117px -618px}.icon-pinboard{background-position:-1173px -618px}.icon-behance{background-position:-1229px -618px}.icon-github{background-position:-1285px -618px}.icon-youtube{background-position:-1341px -618px}.icon-skitch{background-position:-1397px -618px}.icon-4square{background-position:-1453px -618px}.icon-quora{background-position:-1509px -618px}.icon-google-plus{background-position:-1565px -618px}.icon-spotify{background-position:-1621px -618px}.icon-stumbleupon{background-position:-1677px -618px}.icon-readability{background-position:-1733px -618px}.icon-facebook{background-position:-1789px -618px}.icon-twitter-t{background-position:-1845px -618px}.icon-twitter{background-position:-1901px -618px}.icon-buzz{background-position:-1957px -618px}.icon-vimeo{background-position:0 -680px}.icon-flickr{background-position:-56px -680px}.icon-last-fm{background-position:-112px -680px}.icon-rss{background-position:-168px -680px}.icon-skype{background-position:-224px -680px}.icon-e-mail{background-position:-280px -680px}.dropup,.dropdown{position:relative}.dropdown-toggle{*margin-bottom:-3px}.dropdown-toggle:active,.open .dropdown-toggle{outline:0}.caret{display:inline-block;width:0;height:0;vertical-align:top;border-top:4px solid #000;border-right:4px solid transparent;border-left:4px solid transparent;content:""}.dropdown .caret{margin-top:8px;margin-left:2px}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;list-style:none;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.2);*border-right-width:2px;*border-bottom-width:2px;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,0.2);-moz-box-shadow:0 5px 10px rgba(0,0,0,0.2);box-shadow:0 5px 10px rgba(0,0,0,0.2);-webkit-background-clip:padding-box;-moz-background-clip:padding;background-clip:padding-box}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{*width:100%;height:1px;margin:9px 1px;*margin:-5px 0 5px;overflow:hidden;background-color:#e5e5e5;border-bottom:1px solid #fff}.dropdown-menu a{display:block;padding:3px 20px;clear:both;font-weight:normal;line-height:20px;color:#333;white-space:nowrap}.dropdown-menu li>a:hover,.dropdown-menu li>a:focus,.dropdown-submenu:hover>a{color:#fff;text-decoration:none;background-color:#08c;background-color:#0081c2;background-image:-moz-linear-gradient(top,#08c,#0077b3);background-image:-webkit-gradient(linear,0 0,0 100%,from(#08c),to(#0077b3));background-image:-webkit-linear-gradient(top,#08c,#0077b3);background-image:-o-linear-gradient(top,#08c,#0077b3);background-image:linear-gradient(to bottom,#08c,#0077b3);background-repeat:repeat-x;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff0088cc',endColorstr='#ff0077b3',GradientType=0)}.dropdown-menu .active>a,.dropdown-menu .active>a:hover{color:#fff;text-decoration:none;background-color:#08c;background-color:#0081c2;background-image:linear-gradient(to bottom,#08c,#0077b3);background-image:-moz-linear-gradient(top,#08c,#0077b3);background-image:-webkit-gradient(linear,0 0,0 100%,from(#08c),to(#0077b3));background-image:-webkit-linear-gradient(top,#08c,#0077b3);background-image:-o-linear-gradient(top,#08c,#0077b3);background-repeat:repeat-x;outline:0;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff0088cc',endColorstr='#ff0077b3',GradientType=0)}.dropdown-menu .disabled>a,.dropdown-menu .disabled>a:hover{color:#999}.dropdown-menu .disabled>a:hover{text-decoration:none;cursor:default;background-color:transparent}.open{*z-index:1000}.open>.dropdown-menu{display:block}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{border-top:0;border-bottom:4px solid #000;content:"\2191"}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:1px}.dropdown-submenu{position:relative}.dropdown-submenu>.dropdown-menu{top:0;left:100%;margin-top:-6px;margin-left:-1px;-webkit-border-radius:0 6px 6px 6px;-moz-border-radius:0 6px 6px 6px;border-radius:0 6px 6px 6px}.dropdown-submenu:hover .dropdown-menu{display:block}.dropdown-submenu>a:after{display:block;float:right;width:0;height:0;margin-top:5px;margin-right:-10px;border-color:transparent;border-left-color:#ccc;border-style:solid;border-width:5px 0 5px 5px;content:" "}.dropdown-submenu:hover>a:after{border-left-color:#fff}.dropdown .dropdown-menu .nav-header{padding-right:20px;padding-left:20px}.typeahead{margin-top:2px;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.05);box-shadow:inset 0 1px 1px rgba(0,0,0,0.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,0.15)}.well-large{padding:24px;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.well-small{padding:9px;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.fade{opacity:0;-webkit-transition:opacity .15s linear;-moz-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{position:relative;height:0;overflow:hidden;overflow:visible \9;-webkit-transition:height .35s ease;-moz-transition:height .35s ease;-o-transition:height .35s ease;transition:height .35s ease}.collapse.in{height:auto}.close{float:right;font-size:20px;font-weight:bold;line-height:20px;color:#000;text-shadow:0 1px 0 #fff;opacity:.2;filter:alpha(opacity=20)}.close:hover{color:#000;text-decoration:none;cursor:pointer;opacity:.4;filter:alpha(opacity=40)}button.close{padding:0;cursor:pointer;background:transparent;border:0;-webkit-appearance:none}.btn{display:inline-block;*display:inline;padding:4px 14px;margin-bottom:0;*margin-left:.3em;font-size:14px;line-height:20px;*line-height:20px;color:#333;text-align:center;text-shadow:0 1px 1px rgba(255,255,255,0.75);vertical-align:middle;cursor:pointer;background-color:#f5f5f5;*background-color:#e6e6e6;background-image:-webkit-gradient(linear,0 0,0 100%,from(#fff),to(#e6e6e6));background-image:-webkit-linear-gradient(top,#fff,#e6e6e6);background-image:-o-linear-gradient(top,#fff,#e6e6e6);background-image:linear-gradient(to bottom,#fff,#e6e6e6);background-image:-moz-linear-gradient(top,#fff,#e6e6e6);background-repeat:repeat-x;border:1px solid #bbb;*border:0;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);border-color:#e6e6e6 #e6e6e6 #bfbfbf;border-bottom-color:#a2a2a2;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ffffffff',endColorstr='#ffe6e6e6',GradientType=0);filter:progid:dximagetransform.microsoft.gradient(enabled=false);*zoom:1;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05)}.btn:hover,.btn:active,.btn.active,.btn.disabled,.btn[disabled]{color:#333;background-color:#e6e6e6;*background-color:#d9d9d9}.btn:active,.btn.active{background-color:#ccc \9}.btn:first-child{*margin-left:0}.btn:hover{color:#333;text-decoration:none;background-color:#e6e6e6;*background-color:#d9d9d9;background-position:0 -15px;-webkit-transition:background-position .1s linear;-moz-transition:background-position .1s linear;-o-transition:background-position .1s linear;transition:background-position .1s linear}.btn:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn.active,.btn:active{background-color:#e6e6e6;background-color:#d9d9d9 \9;background-image:none;outline:0;-webkit-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05)}.btn.disabled,.btn[disabled]{cursor:default;background-color:#e6e6e6;background-image:none;opacity:.65;filter:alpha(opacity=65);-webkit-box-shadow:none;-moz-box-shadow:none;box-shadow:none}.btn-large{padding:9px 14px;font-size:16px;line-height:normal;-webkit-border-radius:5px;-moz-border-radius:5px;border-radius:5px}.btn-large [class^="icon-"]{margin-top:2px}.btn-small{padding:3px 9px;font-size:12px;line-height:18px}.btn-small [class^="icon-"]{margin-top:0}.btn-mini{padding:2px 6px;font-size:11px;line-height:16px}.btn-block{display:block;width:100%;padding-right:0;padding-left:0;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.btn-block+.btn-block{margin-top:5px}.btn-primary.active,.btn-warning.active,.btn-danger.active,.btn-success.active,.btn-info.active,.btn-inverse.active{color:rgba(255,255,255,0.75)}.btn{border-color:#c5c5c5;border-color:rgba(0,0,0,0.15) rgba(0,0,0,0.15) rgba(0,0,0,0.25)}.btn-primary{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#006dcc;*background-color:#04c;background-image:-webkit-gradient(linear,0 0,0 100%,from(#08c),to(#04c));background-image:-webkit-linear-gradient(top,#08c,#04c);background-image:-o-linear-gradient(top,#08c,#04c);background-image:linear-gradient(to bottom,#08c,#04c);background-image:-moz-linear-gradient(top,#08c,#04c);background-repeat:repeat-x;border-color:#04c #04c #002a80;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff0088cc',endColorstr='#ff0044cc',GradientType=0);filter:progid:dximagetransform.microsoft.gradient(enabled=false)}.btn-primary:hover,.btn-primary:active,.btn-primary.active,.btn-primary.disabled,.btn-primary[disabled]{color:#fff;background-color:#04c;*background-color:#003bb3}.btn-primary:active,.btn-primary.active{background-color:#039 \9}.btn-warning{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#faa732;*background-color:#f89406;background-image:-webkit-gradient(linear,0 0,0 100%,from(#fbb450),to(#f89406));background-image:-webkit-linear-gradient(top,#fbb450,#f89406);background-image:-o-linear-gradient(top,#fbb450,#f89406);background-image:linear-gradient(to bottom,#fbb450,#f89406);background-image:-moz-linear-gradient(top,#fbb450,#f89406);background-repeat:repeat-x;border-color:#f89406 #f89406 #ad6704;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:dximagetransform.microsoft.gradient(startColorstr='#fffbb450',endColorstr='#fff89406',GradientType=0);filter:progid:dximagetransform.microsoft.gradient(enabled=false)}.btn-warning:hover,.btn-warning:active,.btn-warning.active,.btn-warning.disabled,.btn-warning[disabled]{color:#fff;background-color:#f89406;*background-color:#df8505}.btn-warning:active,.btn-warning.active{background-color:#c67605 \9}.btn-danger{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#da4f49;*background-color:#bd362f;background-image:-webkit-gradient(linear,0 0,0 100%,from(#ee5f5b),to(#bd362f));background-image:-webkit-linear-gradient(top,#ee5f5b,#bd362f);background-image:-o-linear-gradient(top,#ee5f5b,#bd362f);background-image:linear-gradient(to bottom,#ee5f5b,#bd362f);background-image:-moz-linear-gradient(top,#ee5f5b,#bd362f);background-repeat:repeat-x;border-color:#bd362f #bd362f #802420;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ffee5f5b',endColorstr='#ffbd362f',GradientType=0);filter:progid:dximagetransform.microsoft.gradient(enabled=false)}.btn-danger:hover,.btn-danger:active,.btn-danger.active,.btn-danger.disabled,.btn-danger[disabled]{color:#fff;background-color:#bd362f;*background-color:#a9302a}.btn-danger:active,.btn-danger.active{background-color:#942a25 \9}.btn-success{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#5bb75b;*background-color:#51a351;background-image:-webkit-gradient(linear,0 0,0 100%,from(#62c462),to(#51a351));background-image:-webkit-linear-gradient(top,#62c462,#51a351);background-image:-o-linear-gradient(top,#62c462,#51a351);background-image:linear-gradient(to bottom,#62c462,#51a351);background-image:-moz-linear-gradient(top,#62c462,#51a351);background-repeat:repeat-x;border-color:#51a351 #51a351 #387038;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff62c462',endColorstr='#ff51a351',GradientType=0);filter:progid:dximagetransform.microsoft.gradient(enabled=false)}.btn-success:hover,.btn-success:active,.btn-success.active,.btn-success.disabled,.btn-success[disabled]{color:#fff;background-color:#51a351;*background-color:#499249}.btn-success:active,.btn-success.active{background-color:#408140 \9}.btn-info{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#49afcd;*background-color:#2f96b4;background-image:-webkit-gradient(linear,0 0,0 100%,from(#5bc0de),to(#2f96b4));background-image:-webkit-linear-gradient(top,#5bc0de,#2f96b4);background-image:-o-linear-gradient(top,#5bc0de,#2f96b4);background-image:linear-gradient(to bottom,#5bc0de,#2f96b4);background-image:-moz-linear-gradient(top,#5bc0de,#2f96b4);background-repeat:repeat-x;border-color:#2f96b4 #2f96b4 #1f6377;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff2f96b4',GradientType=0);filter:progid:dximagetransform.microsoft.gradient(enabled=false)}.btn-info:hover,.btn-info:active,.btn-info.active,.btn-info.disabled,.btn-info[disabled]{color:#fff;background-color:#2f96b4;*background-color:#2a85a0}.btn-info:active,.btn-info.active{background-color:#24748c \9}.btn-inverse{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#363636;*background-color:#222;background-image:-webkit-gradient(linear,0 0,0 100%,from(#444),to(#222));background-image:-webkit-linear-gradient(top,#444,#222);background-image:-o-linear-gradient(top,#444,#222);background-image:linear-gradient(to bottom,#444,#222);background-image:-moz-linear-gradient(top,#444,#222);background-repeat:repeat-x;border-color:#222 #222 #000;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff444444',endColorstr='#ff222222',GradientType=0);filter:progid:dximagetransform.microsoft.gradient(enabled=false)}.btn-inverse:hover,.btn-inverse:active,.btn-inverse.active,.btn-inverse.disabled,.btn-inverse[disabled]{color:#fff;background-color:#222;*background-color:#151515}.btn-inverse:active,.btn-inverse.active{background-color:#080808 \9}button.btn,input[type="submit"].btn{*padding-top:3px;*padding-bottom:3px}button.btn::-moz-focus-inner,input[type="submit"].btn::-moz-focus-inner{padding:0;border:0}button.btn.btn-large,input[type="submit"].btn.btn-large{*padding-top:7px;*padding-bottom:7px}button.btn.btn-small,input[type="submit"].btn.btn-small{*padding-top:3px;*padding-bottom:3px}button.btn.btn-mini,input[type="submit"].btn.btn-mini{*padding-top:1px;*padding-bottom:1px}.btn-link,.btn-link:active{background-color:transparent;background-image:none;-webkit-box-shadow:none;-moz-box-shadow:none;box-shadow:none}.btn-link{color:#08c;cursor:pointer;border-color:transparent;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.btn-link:hover{color:#005580;text-decoration:underline;background-color:transparent}.btn-group{position:relative;*margin-left:.3em;font-size:0;white-space:nowrap}.btn-group:first-child{*margin-left:0}.btn-group+.btn-group{margin-left:5px}.btn-toolbar{margin-top:10px;margin-bottom:10px;font-size:0}.btn-toolbar .btn-group{display:inline-block;*display:inline;*zoom:1}.btn-toolbar .btn+.btn,.btn-toolbar .btn-group+.btn,.btn-toolbar .btn+.btn-group{margin-left:5px}.btn-group>.btn{position:relative;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.btn-group>.btn+.btn{margin-left:-1px}.btn-group>.btn,.btn-group>.dropdown-menu{font-size:14px}.btn-group>.btn-mini{font-size:11px}.btn-group>.btn-small{font-size:12px}.btn-group>.btn-large{font-size:16px}.btn-group>.btn:first-child{margin-left:0;-webkit-border-bottom-left-radius:4px;border-bottom-left-radius:4px;-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-bottomleft:4px;-moz-border-radius-topleft:4px}.btn-group>.btn:last-child,.btn-group>.dropdown-toggle{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-webkit-border-bottom-right-radius:4px;border-bottom-right-radius:4px;-moz-border-radius-topright:4px;-moz-border-radius-bottomright:4px}.btn-group>.btn.large:first-child{margin-left:0;-webkit-border-bottom-left-radius:6px;border-bottom-left-radius:6px;-webkit-border-top-left-radius:6px;border-top-left-radius:6px;-moz-border-radius-bottomleft:6px;-moz-border-radius-topleft:6px}.btn-group>.btn.large:last-child,.btn-group>.large.dropdown-toggle{-webkit-border-top-right-radius:6px;border-top-right-radius:6px;-webkit-border-bottom-right-radius:6px;border-bottom-right-radius:6px;-moz-border-radius-topright:6px;-moz-border-radius-bottomright:6px}.btn-group>.btn:hover,.btn-group>.btn:focus,.btn-group>.btn:active,.btn-group>.btn.active{z-index:2}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{*padding-top:5px;padding-right:8px;*padding-bottom:5px;padding-left:8px;-webkit-box-shadow:inset 1px 0 0 rgba(255,255,255,0.125),inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 1px 0 0 rgba(255,255,255,0.125),inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 1px 0 0 rgba(255,255,255,0.125),inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05)}.btn-group>.btn-mini+.dropdown-toggle{*padding-top:2px;padding-right:5px;*padding-bottom:2px;padding-left:5px}.btn-group>.btn-small+.dropdown-toggle{*padding-top:5px;*padding-bottom:4px}.btn-group>.btn-large+.dropdown-toggle{*padding-top:7px;padding-right:12px;*padding-bottom:7px;padding-left:12px}.btn-group.open .dropdown-toggle{background-image:none;-webkit-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05)}.btn-group.open .btn.dropdown-toggle{background-color:#e6e6e6}.btn-group.open .btn-primary.dropdown-toggle{background-color:#04c}.btn-group.open .btn-warning.dropdown-toggle{background-color:#f89406}.btn-group.open .btn-danger.dropdown-toggle{background-color:#bd362f}.btn-group.open .btn-success.dropdown-toggle{background-color:#51a351}.btn-group.open .btn-info.dropdown-toggle{background-color:#2f96b4}.btn-group.open .btn-inverse.dropdown-toggle{background-color:#222}.btn .caret{margin-top:8px;margin-left:0}.btn-mini .caret,.btn-small .caret,.btn-large .caret{margin-top:6px}.btn-large .caret{border-top-width:5px;border-right-width:5px;border-left-width:5px}.dropup .btn-large .caret{border-top:0;border-bottom:5px solid #000}.btn-primary .caret,.btn-warning .caret,.btn-danger .caret,.btn-info .caret,.btn-success .caret,.btn-inverse .caret{border-top-color:#fff;border-bottom-color:#fff}.btn-group-vertical{display:inline-block;*display:inline;*zoom:1}.btn-group-vertical .btn{display:block;float:none;width:100%;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.btn-group-vertical .btn+.btn{margin-top:-1px;margin-left:0}.btn-group-vertical .btn:first-child{-webkit-border-radius:4px 4px 0 0;-moz-border-radius:4px 4px 0 0;border-radius:4px 4px 0 0}.btn-group-vertical .btn:last-child{-webkit-border-radius:0 0 4px 4px;-moz-border-radius:0 0 4px 4px;border-radius:0 0 4px 4px}.btn-group-vertical .btn-large:first-child{-webkit-border-radius:6px 6px 0 0;-moz-border-radius:6px 6px 0 0;border-radius:6px 6px 0 0}.btn-group-vertical .btn-large:last-child{-webkit-border-radius:0 0 6px 6px;-moz-border-radius:0 0 6px 6px;border-radius:0 0 6px 6px}.alert{padding:8px 35px 8px 14px;margin-bottom:20px;color:#c09853;text-shadow:0 1px 0 rgba(255,255,255,0.5);background-color:#fcf8e3;border:1px solid #fbeed5;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.alert h4{margin:0}.alert .close{position:relative;top:-2px;right:-21px;line-height:20px}.alert-success{color:#468847;background-color:#dff0d8;border-color:#d6e9c6}.alert-danger,.alert-error{color:#b94a48;background-color:#f2dede;border-color:#eed3d7}.alert-info{color:#3a87ad;background-color:#d9edf7;border-color:#bce8f1}.alert-block{padding-top:14px;padding-bottom:14px}.alert-block>p,.alert-block>ul{margin-bottom:0}.alert-block p+p{margin-top:5px}.nav{margin-bottom:20px;margin-left:0;list-style:none}.nav>li>a{display:block}.nav>li>a:hover{text-decoration:none;background-color:#eee}.nav>.pull-right{float:right}.nav-header{display:block;padding:3px 15px;font-size:11px;font-weight:bold;line-height:20px;color:#999;text-shadow:0 1px 0 rgba(255,255,255,0.5);text-transform:uppercase}.nav li+.nav-header{margin-top:9px}.nav-list{padding-right:15px;padding-left:15px;margin-bottom:0}.nav-list>li>a,.nav-list .nav-header{margin-right:-15px;margin-left:-15px;text-shadow:0 1px 0 rgba(255,255,255,0.5)}.nav-list>li>a{padding:3px 15px}.nav-list>.active>a,.nav-list>.active>a:hover{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.2);background-color:#08c}.nav-list [class^="icon-"]{margin-right:2px}.nav-list .divider{*width:100%;height:1px;margin:9px 1px;*margin:-5px 0 5px;overflow:hidden;background-color:#e5e5e5;border-bottom:1px solid #fff}.nav-tabs,.nav-pills{*zoom:1}.nav-tabs:before,.nav-pills:before,.nav-tabs:after,.nav-pills:after{display:table;line-height:0;content:""}.nav-tabs:after,.nav-pills:after{clear:both}.nav-tabs>li,.nav-pills>li{float:left}.nav-tabs>li>a,.nav-pills>li>a{padding-right:12px;padding-left:12px;margin-right:2px;line-height:14px}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{margin-bottom:-1px}.nav-tabs>li>a{padding-top:8px;padding-bottom:8px;line-height:20px;border:1px solid transparent;-webkit-border-radius:4px 4px 0 0;-moz-border-radius:4px 4px 0 0;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>.active>a,.nav-tabs>.active>a:hover{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-pills>li>a{padding-top:8px;padding-bottom:8px;margin-top:2px;margin-bottom:2px;-webkit-border-radius:5px;-moz-border-radius:5px;border-radius:5px}.nav-pills>.active>a,.nav-pills>.active>a:hover{color:#fff;background-color:#08c}.nav-stacked>li{float:none}.nav-stacked>li>a{margin-right:0}.nav-tabs.nav-stacked{border-bottom:0}.nav-tabs.nav-stacked>li>a{border:1px solid #ddd;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.nav-tabs.nav-stacked>li:first-child>a{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-topright:4px;-moz-border-radius-topleft:4px}.nav-tabs.nav-stacked>li:last-child>a{-webkit-border-bottom-right-radius:4px;border-bottom-right-radius:4px;-webkit-border-bottom-left-radius:4px;border-bottom-left-radius:4px;-moz-border-radius-bottomright:4px;-moz-border-radius-bottomleft:4px}.nav-tabs.nav-stacked>li>a:hover{z-index:2;border-color:#ddd}.nav-pills.nav-stacked>li>a{margin-bottom:3px}.nav-pills.nav-stacked>li:last-child>a{margin-bottom:1px}.nav-tabs .dropdown-menu{-webkit-border-radius:0 0 6px 6px;-moz-border-radius:0 0 6px 6px;border-radius:0 0 6px 6px}.nav-pills .dropdown-menu{-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.nav .dropdown-toggle .caret{margin-top:6px;border-top-color:#08c;border-bottom-color:#08c}.nav .dropdown-toggle:hover .caret{border-top-color:#005580;border-bottom-color:#005580}.nav-tabs .dropdown-toggle .caret{margin-top:8px}.nav .active .dropdown-toggle .caret{border-top-color:#fff;border-bottom-color:#fff}.nav-tabs .active .dropdown-toggle .caret{border-top-color:#555;border-bottom-color:#555}.nav>.dropdown.active>a:hover{cursor:pointer}.nav-tabs .open .dropdown-toggle,.nav-pills .open .dropdown-toggle,.nav>li.dropdown.open.active>a:hover{color:#fff;background-color:#999;border-color:#999}.nav li.dropdown.open .caret,.nav li.dropdown.open.active .caret,.nav li.dropdown.open a:hover .caret{border-top-color:#fff;border-bottom-color:#fff;opacity:1;filter:alpha(opacity=100)}.tabs-stacked .open>a:hover{border-color:#999}.tabbable{*zoom:1}.tabbable:before,.tabbable:after{display:table;line-height:0;content:""}.tabbable:after{clear:both}.tab-content{overflow:auto}.tabs-below>.nav-tabs,.tabs-right>.nav-tabs,.tabs-left>.nav-tabs{border-bottom:0}.tab-content>.tab-pane,.pill-content>.pill-pane{display:none}.tab-content>.active,.pill-content>.active{display:block}.tabs-below>.nav-tabs{border-top:1px solid #ddd}.tabs-below>.nav-tabs>li{margin-top:-1px;margin-bottom:0}.tabs-below>.nav-tabs>li>a{-webkit-border-radius:0 0 4px 4px;-moz-border-radius:0 0 4px 4px;border-radius:0 0 4px 4px}.tabs-below>.nav-tabs>li>a:hover{border-top-color:#ddd;border-bottom-color:transparent}.tabs-below>.nav-tabs>.active>a,.tabs-below>.nav-tabs>.active>a:hover{border-color:transparent #ddd #ddd #ddd}.tabs-left>.nav-tabs>li,.tabs-right>.nav-tabs>li{float:none}.tabs-left>.nav-tabs>li>a,.tabs-right>.nav-tabs>li>a{min-width:74px;margin-right:0;margin-bottom:3px}.tabs-left>.nav-tabs{float:left;margin-right:19px;border-right:1px solid #ddd}.tabs-left>.nav-tabs>li>a{margin-right:-1px;-webkit-border-radius:4px 0 0 4px;-moz-border-radius:4px 0 0 4px;border-radius:4px 0 0 4px}.tabs-left>.nav-tabs>li>a:hover{border-color:#eee #ddd #eee #eee}.tabs-left>.nav-tabs .active>a,.tabs-left>.nav-tabs .active>a:hover{border-color:#ddd transparent #ddd #ddd;*border-right-color:#fff}.tabs-right>.nav-tabs{float:right;margin-left:19px;border-left:1px solid #ddd}.tabs-right>.nav-tabs>li>a{margin-left:-1px;-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.tabs-right>.nav-tabs>li>a:hover{border-color:#eee #eee #eee #ddd}.tabs-right>.nav-tabs .active>a,.tabs-right>.nav-tabs .active>a:hover{border-color:#ddd #ddd #ddd transparent;*border-left-color:#fff}.nav>.disabled>a{color:#999}.nav>.disabled>a:hover{text-decoration:none;cursor:default;background-color:transparent}.navbar{*position:relative;*z-index:2;margin-bottom:20px;overflow:visible;color:#555}.navbar-inner{min-height:40px;padding-right:20px;padding-left:20px;background-color:#fafafa;background-image:-moz-linear-gradient(top,#fff,#f2f2f2);background-image:-webkit-gradient(linear,0 0,0 100%,from(#fff),to(#f2f2f2));background-image:-webkit-linear-gradient(top,#fff,#f2f2f2);background-image:-o-linear-gradient(top,#fff,#f2f2f2);background-image:linear-gradient(to bottom,#fff,#f2f2f2);background-repeat:repeat-x;border:1px solid #d4d4d4;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ffffffff',endColorstr='#fff2f2f2',GradientType=0);-webkit-box-shadow:0 1px 4px rgba(0,0,0,0.065);-moz-box-shadow:0 1px 4px rgba(0,0,0,0.065);box-shadow:0 1px 4px rgba(0,0,0,0.065)}.navbar .container{width:auto}.nav-collapse.collapse{height:auto}.navbar .brand{display:block;float:left;padding:10px 20px 10px;margin-left:-20px;font-size:20px;font-weight:200;color:#555;text-shadow:0 1px 0 #fff}.navbar .brand:hover{text-decoration:none}.navbar-text{margin-bottom:0;line-height:40px}.navbar-link{color:#555}.navbar-link:hover{color:#333}.navbar .divider-vertical{height:40px;margin:0 9px;border-right:1px solid #fff;border-left:1px solid #f2f2f2}.navbar .btn,.navbar .btn-group{margin-top:6px}.navbar .btn-group .btn{margin:0}.navbar-form{margin-bottom:0;*zoom:1}.navbar-form:before,.navbar-form:after{display:table;line-height:0;content:""}.navbar-form:after{clear:both}.navbar-form input,.navbar-form select,.navbar-form .radio,.navbar-form .checkbox{margin-top:5px}.navbar-form input,.navbar-form select,.navbar-form .btn{display:inline-block;margin-bottom:0}.navbar-form input[type="image"],.navbar-form input[type="checkbox"],.navbar-form input[type="radio"]{margin-top:3px}.navbar-form .input-append,.navbar-form .input-prepend{margin-top:6px;white-space:nowrap}.navbar-form .input-append input,.navbar-form .input-prepend input{margin-top:0}.navbar-search{position:relative;float:left;margin-top:5px;margin-bottom:0}.navbar-search .search-query{padding:4px 14px;margin-bottom:0;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:13px;font-weight:normal;line-height:1;-webkit-border-radius:15px;-moz-border-radius:15px;border-radius:15px}.navbar-static-top{position:static;width:100%;margin-bottom:0}.navbar-static-top .navbar-inner{-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.navbar-fixed-top,.navbar-fixed-bottom{position:fixed;right:0;left:0;z-index:1030;margin-bottom:0}.navbar-fixed-top .navbar-inner,.navbar-fixed-bottom .navbar-inner,.navbar-static-top .navbar-inner{border:0}.navbar-fixed-top .navbar-inner,.navbar-fixed-bottom .navbar-inner{padding-right:0;padding-left:0;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.navbar-static-top .container,.navbar-fixed-top .container,.navbar-fixed-bottom .container{width:940px}.navbar-fixed-top{top:0}.navbar-fixed-top .navbar-inner,.navbar-static-top .navbar-inner{-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.1),0 1px 10px rgba(0,0,0,0.1);-moz-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.1),0 1px 10px rgba(0,0,0,0.1);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.1),0 1px 10px rgba(0,0,0,0.1)}.navbar-fixed-bottom{bottom:0}.navbar-fixed-bottom .navbar-inner{-webkit-box-shadow:inset 0 1px 0 rgba(0,0,0,0.1),0 -1px 10px rgba(0,0,0,0.1);-moz-box-shadow:inset 0 1px 0 rgba(0,0,0,0.1),0 -1px 10px rgba(0,0,0,0.1);box-shadow:inset 0 1px 0 rgba(0,0,0,0.1),0 -1px 10px rgba(0,0,0,0.1)}.navbar .nav{position:relative;left:0;display:block;float:left;margin:0 10px 0 0}.navbar .nav.pull-right{float:right}.navbar .nav>li{float:left}.navbar .nav>li>a{float:none;padding:10px 15px 10px;color:#555;text-decoration:none;text-shadow:0 1px 0 #fff}.navbar .nav .dropdown-toggle .caret{margin-top:8px}.navbar .nav>li>a:focus,.navbar .nav>li>a:hover{color:#333;text-decoration:none;background-color:transparent}.navbar .nav>.active>a,.navbar .nav>.active>a:hover,.navbar .nav>.active>a:focus{color:#555;text-decoration:none;background-color:#e5e5e5;-webkit-box-shadow:inset 0 3px 8px rgba(0,0,0,0.125);-moz-box-shadow:inset 0 3px 8px rgba(0,0,0,0.125);box-shadow:inset 0 3px 8px rgba(0,0,0,0.125)}.navbar .btn-navbar{display:none;float:right;padding:7px 10px;margin-right:5px;margin-left:5px;color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#ededed;*background-color:#e5e5e5;background-image:-webkit-gradient(linear,0 0,0 100%,from(#f2f2f2),to(#e5e5e5));background-image:-webkit-linear-gradient(top,#f2f2f2,#e5e5e5);background-image:-o-linear-gradient(top,#f2f2f2,#e5e5e5);background-image:linear-gradient(to bottom,#f2f2f2,#e5e5e5);background-image:-moz-linear-gradient(top,#f2f2f2,#e5e5e5);background-repeat:repeat-x;border-color:#e5e5e5 #e5e5e5 #bfbfbf;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:dximagetransform.microsoft.gradient(startColorstr='#fff2f2f2',endColorstr='#ffe5e5e5',GradientType=0);filter:progid:dximagetransform.microsoft.gradient(enabled=false);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.075);-moz-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.075);box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.075)}.navbar .btn-navbar:hover,.navbar .btn-navbar:active,.navbar .btn-navbar.active,.navbar .btn-navbar.disabled,.navbar .btn-navbar[disabled]{color:#fff;background-color:#e5e5e5;*background-color:#d9d9d9}.navbar .btn-navbar:active,.navbar .btn-navbar.active{background-color:#ccc \9}.navbar .btn-navbar .icon-bar{display:block;width:18px;height:2px;background-color:#f5f5f5;-webkit-border-radius:1px;-moz-border-radius:1px;border-radius:1px;-webkit-box-shadow:0 1px 0 rgba(0,0,0,0.25);-moz-box-shadow:0 1px 0 rgba(0,0,0,0.25);box-shadow:0 1px 0 rgba(0,0,0,0.25)}.btn-navbar .icon-bar+.icon-bar{margin-top:3px}.navbar .nav>li>.dropdown-menu:before{position:absolute;top:-7px;left:9px;display:inline-block;border-right:7px solid transparent;border-bottom:7px solid #ccc;border-left:7px solid transparent;border-bottom-color:rgba(0,0,0,0.2);content:''}.navbar .nav>li>.dropdown-menu:after{position:absolute;top:-6px;left:10px;display:inline-block;border-right:6px solid transparent;border-bottom:6px solid #fff;border-left:6px solid transparent;content:''}.navbar-fixed-bottom .nav>li>.dropdown-menu:before{top:auto;bottom:-7px;border-top:7px solid #ccc;border-bottom:0;border-top-color:rgba(0,0,0,0.2)}.navbar-fixed-bottom .nav>li>.dropdown-menu:after{top:auto;bottom:-6px;border-top:6px solid #fff;border-bottom:0}.navbar .nav li.dropdown.open>.dropdown-toggle,.navbar .nav li.dropdown.active>.dropdown-toggle,.navbar .nav li.dropdown.open.active>.dropdown-toggle{color:#555;background-color:#e5e5e5}.navbar .nav li.dropdown>.dropdown-toggle .caret{border-top-color:#555;border-bottom-color:#555}.navbar .nav li.dropdown.open>.dropdown-toggle .caret,.navbar .nav li.dropdown.active>.dropdown-toggle .caret,.navbar .nav li.dropdown.open.active>.dropdown-toggle .caret{border-top-color:#555;border-bottom-color:#555}.navbar .pull-right>li>.dropdown-menu,.navbar .nav>li>.dropdown-menu.pull-right{right:0;left:auto}.navbar .pull-right>li>.dropdown-menu:before,.navbar .nav>li>.dropdown-menu.pull-right:before{right:12px;left:auto}.navbar .pull-right>li>.dropdown-menu:after,.navbar .nav>li>.dropdown-menu.pull-right:after{right:13px;left:auto}.navbar .pull-right>li>.dropdown-menu .dropdown-menu,.navbar .nav>li>.dropdown-menu.pull-right .dropdown-menu{right:100%;left:auto;margin-right:-1px;margin-left:0;-webkit-border-radius:6px 0 6px 6px;-moz-border-radius:6px 0 6px 6px;border-radius:6px 0 6px 6px}.navbar-inverse{color:#999}.navbar-inverse .navbar-inner{background-color:#1b1b1b;background-image:-moz-linear-gradient(top,#222,#111);background-image:-webkit-gradient(linear,0 0,0 100%,from(#222),to(#111));background-image:-webkit-linear-gradient(top,#222,#111);background-image:-o-linear-gradient(top,#222,#111);background-image:linear-gradient(to bottom,#222,#111);background-repeat:repeat-x;border-color:#252525;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff222222',endColorstr='#ff111111',GradientType=0)}.navbar-inverse .brand,.navbar-inverse .nav>li>a{color:#999;text-shadow:0 -1px 0 rgba(0,0,0,0.25)}.navbar-inverse .brand:hover,.navbar-inverse .nav>li>a:hover{color:#fff}.navbar-inverse .nav>li>a:focus,.navbar-inverse .nav>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .nav .active>a,.navbar-inverse .nav .active>a:hover,.navbar-inverse .nav .active>a:focus{color:#fff;background-color:#111}.navbar-inverse .navbar-link{color:#999}.navbar-inverse .navbar-link:hover{color:#fff}.navbar-inverse .divider-vertical{border-right-color:#222;border-left-color:#111}.navbar-inverse .nav li.dropdown.open>.dropdown-toggle,.navbar-inverse .nav li.dropdown.active>.dropdown-toggle,.navbar-inverse .nav li.dropdown.open.active>.dropdown-toggle{color:#fff;background-color:#111}.navbar-inverse .nav li.dropdown>.dropdown-toggle .caret{border-top-color:#999;border-bottom-color:#999}.navbar-inverse .nav li.dropdown.open>.dropdown-toggle .caret,.navbar-inverse .nav li.dropdown.active>.dropdown-toggle .caret,.navbar-inverse .nav li.dropdown.open.active>.dropdown-toggle .caret{border-top-color:#fff;border-bottom-color:#fff}.navbar-inverse .navbar-search .search-query{color:#fff;background-color:#515151;border-color:#111;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1),0 1px 0 rgba(255,255,255,0.15);-moz-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1),0 1px 0 rgba(255,255,255,0.15);box-shadow:inset 0 1px 2px rgba(0,0,0,0.1),0 1px 0 rgba(255,255,255,0.15);-webkit-transition:none;-moz-transition:none;-o-transition:none;transition:none}.navbar-inverse .navbar-search .search-query:-moz-placeholder{color:#ccc}.navbar-inverse .navbar-search .search-query:-ms-input-placeholder{color:#ccc}.navbar-inverse .navbar-search .search-query::-webkit-input-placeholder{color:#ccc}.navbar-inverse .navbar-search .search-query:focus,.navbar-inverse .navbar-search .search-query.focused{padding:5px 15px;color:#333;text-shadow:0 1px 0 #fff;background-color:#fff;border:0;outline:0;-webkit-box-shadow:0 0 3px rgba(0,0,0,0.15);-moz-box-shadow:0 0 3px rgba(0,0,0,0.15);box-shadow:0 0 3px rgba(0,0,0,0.15)}.navbar-inverse .btn-navbar{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#0e0e0e;*background-color:#040404;background-image:-webkit-gradient(linear,0 0,0 100%,from(#151515),to(#040404));background-image:-webkit-linear-gradient(top,#151515,#040404);background-image:-o-linear-gradient(top,#151515,#040404);background-image:linear-gradient(to bottom,#151515,#040404);background-image:-moz-linear-gradient(top,#151515,#040404);background-repeat:repeat-x;border-color:#040404 #040404 #000;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff151515',endColorstr='#ff040404',GradientType=0);filter:progid:dximagetransform.microsoft.gradient(enabled=false)}.navbar-inverse .btn-navbar:hover,.navbar-inverse .btn-navbar:active,.navbar-inverse .btn-navbar.active,.navbar-inverse .btn-navbar.disabled,.navbar-inverse .btn-navbar[disabled]{color:#fff;background-color:#040404;*background-color:#000}.navbar-inverse .btn-navbar:active,.navbar-inverse .btn-navbar.active{background-color:#000 \9}.breadcrumb{padding:8px 15px;margin:0 0 20px;list-style:none;background-color:#f5f5f5;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.breadcrumb li{display:inline-block;*display:inline;text-shadow:0 1px 0 #fff;*zoom:1}.breadcrumb .divider{padding:0 5px;color:#ccc}.breadcrumb .active{color:#999}.pagination{height:40px;margin:20px 0}.pagination ul{display:inline-block;*display:inline;margin-bottom:0;margin-left:0;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px;*zoom:1;-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:0 1px 2px rgba(0,0,0,0.05);box-shadow:0 1px 2px rgba(0,0,0,0.05)}.pagination li{display:inline}.pagination a,.pagination span{float:left;padding:0 14px;line-height:38px;text-decoration:none;background-color:#fff;border:1px solid #ddd;border-left-width:0}.pagination a:hover,.pagination .active a,.pagination .active span{background-color:#f5f5f5}.pagination .active a,.pagination .active span{color:#999;cursor:default}.pagination .disabled span,.pagination .disabled a,.pagination .disabled a:hover{color:#999;cursor:default;background-color:transparent}.pagination li:first-child a,.pagination li:first-child span{border-left-width:1px;-webkit-border-radius:3px 0 0 3px;-moz-border-radius:3px 0 0 3px;border-radius:3px 0 0 3px}.pagination li:last-child a,.pagination li:last-child span{-webkit-border-radius:0 3px 3px 0;-moz-border-radius:0 3px 3px 0;border-radius:0 3px 3px 0}.pagination-centered{text-align:center}.pagination-right{text-align:right}.pager{margin:20px 0;text-align:center;list-style:none;*zoom:1}.pager:before,.pager:after{display:table;line-height:0;content:""}.pager:after{clear:both}.pager li{display:inline}.pager a{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;-webkit-border-radius:15px;-moz-border-radius:15px;border-radius:15px}.pager a:hover{text-decoration:none;background-color:#f5f5f5}.pager .next a{float:right}.pager .previous a{float:left}.pager .disabled a,.pager .disabled a:hover{color:#999;cursor:default;background-color:#fff}.modal-open .dropdown-menu{z-index:2050}.modal-open .dropdown.open{*z-index:2050}.modal-open .popover{z-index:2060}.modal-open .tooltip{z-index:2080}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{opacity:0}.modal-backdrop,.modal-backdrop.fade.in{opacity:.8;filter:alpha(opacity=80)}.modal{position:fixed;top:50%;left:50%;z-index:1050;width:560px;margin:-250px 0 0 -280px;overflow:auto;background-color:#fff;border:1px solid #999;border:1px solid rgba(0,0,0,0.3);*border:1px solid #999;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;-webkit-box-shadow:0 3px 7px rgba(0,0,0,0.3);-moz-box-shadow:0 3px 7px rgba(0,0,0,0.3);box-shadow:0 3px 7px rgba(0,0,0,0.3);-webkit-background-clip:padding-box;-moz-background-clip:padding-box;background-clip:padding-box}.modal.fade{top:-25%;-webkit-transition:opacity .3s linear,top .3s ease-out;-moz-transition:opacity .3s linear,top .3s ease-out;-o-transition:opacity .3s linear,top .3s ease-out;transition:opacity .3s linear,top .3s ease-out}.modal.fade.in{top:50%}.modal-header{padding:9px 15px;border-bottom:1px solid #eee}.modal-header .close{margin-top:2px}.modal-header h3{margin:0;line-height:30px}.modal-body{max-height:400px;padding:15px;overflow-y:auto}.modal-form{margin-bottom:0}.modal-footer{padding:14px 15px 15px;margin-bottom:0;text-align:right;background-color:#f5f5f5;border-top:1px solid #ddd;-webkit-border-radius:0 0 6px 6px;-moz-border-radius:0 0 6px 6px;border-radius:0 0 6px 6px;*zoom:1;-webkit-box-shadow:inset 0 1px 0 #fff;-moz-box-shadow:inset 0 1px 0 #fff;box-shadow:inset 0 1px 0 #fff}.modal-footer:before,.modal-footer:after{display:table;line-height:0;content:""}.modal-footer:after{clear:both}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.tooltip{position:absolute;z-index:1030;display:block;padding:5px;font-size:11px;opacity:0;filter:alpha(opacity=0);visibility:visible}.tooltip.in{opacity:.8;filter:alpha(opacity=80)}.tooltip.top{margin-top:-3px}.tooltip.right{margin-left:3px}.tooltip.bottom{margin-top:3px}.tooltip.left{margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;text-decoration:none;background-color:#000;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-top-color:#000;border-width:5px 5px 0}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-right-color:#000;border-width:5px 5px 5px 0}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-left-color:#000;border-width:5px 0 5px 5px}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-bottom-color:#000;border-width:0 5px 5px}.popover{position:absolute;top:0;left:0;z-index:1010;display:none;width:236px;padding:1px;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.2);-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,0.2);-moz-box-shadow:0 5px 10px rgba(0,0,0,0.2);box-shadow:0 5px 10px rgba(0,0,0,0.2);-webkit-background-clip:padding-box;-moz-background-clip:padding;background-clip:padding-box}.popover.top{margin-bottom:10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-right:10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;font-weight:normal;line-height:18px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;-webkit-border-radius:5px 5px 0 0;-moz-border-radius:5px 5px 0 0;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover-content p,.popover-content ul,.popover-content ol{margin-bottom:0}.popover .arrow,.popover .arrow:after{position:absolute;display:inline-block;width:0;height:0;border-color:transparent;border-style:solid}.popover .arrow:after{z-index:-1;content:""}.popover.top .arrow{bottom:-10px;left:50%;margin-left:-10px;border-top-color:#fff;border-width:10px 10px 0}.popover.top .arrow:after{bottom:-1px;left:-11px;border-top-color:rgba(0,0,0,0.25);border-width:11px 11px 0}.popover.right .arrow{top:50%;left:-10px;margin-top:-10px;border-right-color:#fff;border-width:10px 10px 10px 0}.popover.right .arrow:after{bottom:-11px;left:-1px;border-right-color:rgba(0,0,0,0.25);border-width:11px 11px 11px 0}.popover.bottom .arrow{top:-10px;left:50%;margin-left:-10px;border-bottom-color:#fff;border-width:0 10px 10px}.popover.bottom .arrow:after{top:-1px;left:-11px;border-bottom-color:rgba(0,0,0,0.25);border-width:0 11px 11px}.popover.left .arrow{top:50%;right:-10px;margin-top:-10px;border-left-color:#fff;border-width:10px 0 10px 10px}.popover.left .arrow:after{right:-1px;bottom:-11px;border-left-color:rgba(0,0,0,0.25);border-width:11px 0 11px 11px}.thumbnails{margin-left:-20px;list-style:none;*zoom:1}.thumbnails:before,.thumbnails:after{display:table;line-height:0;content:""}.thumbnails:after{clear:both}.row-fluid .thumbnails{margin-left:0}.thumbnails>li{float:left;margin-bottom:20px;margin-left:20px}.thumbnail{display:block;padding:4px;line-height:20px;border:1px solid #ddd;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;-webkit-box-shadow:0 1px 3px rgba(0,0,0,0.055);-moz-box-shadow:0 1px 3px rgba(0,0,0,0.055);box-shadow:0 1px 3px rgba(0,0,0,0.055);-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}a.thumbnail:hover{border-color:#08c;-webkit-box-shadow:0 1px 4px rgba(0,105,214,0.25);-moz-box-shadow:0 1px 4px rgba(0,105,214,0.25);box-shadow:0 1px 4px rgba(0,105,214,0.25)}.thumbnail>img{display:block;max-width:100%;margin-right:auto;margin-left:auto}.thumbnail .caption{padding:9px;color:#555}.label,.badge{font-size:11.844px;font-weight:bold;line-height:14px;color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);white-space:nowrap;vertical-align:baseline;background-color:#999}.label{padding:1px 4px 2px;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.badge{padding:1px 9px 2px;-webkit-border-radius:9px;-moz-border-radius:9px;border-radius:9px}a.label:hover,a.badge:hover{color:#fff;text-decoration:none;cursor:pointer}.label-important,.badge-important{background-color:#b94a48}.label-important[href],.badge-important[href]{background-color:#953b39}.label-warning,.badge-warning{background-color:#f89406}.label-warning[href],.badge-warning[href]{background-color:#c67605}.label-success,.badge-success{background-color:#468847}.label-success[href],.badge-success[href]{background-color:#356635}.label-info,.badge-info{background-color:#3a87ad}.label-info[href],.badge-info[href]{background-color:#2d6987}.label-inverse,.badge-inverse{background-color:#333}.label-inverse[href],.badge-inverse[href]{background-color:#1a1a1a}.btn .label,.btn .badge{position:relative;top:-1px}.btn-mini .label,.btn-mini .badge{top:0}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-moz-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-ms-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:0 0}to{background-position:40px 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f7f7f7;background-image:-moz-linear-gradient(top,#f5f5f5,#f9f9f9);background-image:-webkit-gradient(linear,0 0,0 100%,from(#f5f5f5),to(#f9f9f9));background-image:-webkit-linear-gradient(top,#f5f5f5,#f9f9f9);background-image:-o-linear-gradient(top,#f5f5f5,#f9f9f9);background-image:linear-gradient(to bottom,#f5f5f5,#f9f9f9);background-repeat:repeat-x;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#fff5f5f5',endColorstr='#fff9f9f9',GradientType=0);-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);-moz-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);box-shadow:inset 0 1px 2px rgba(0,0,0,0.1)}.progress .bar{float:left;width:0;height:100%;font-size:12px;color:#fff;text-align:center;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#0e90d2;background-image:-moz-linear-gradient(top,#149bdf,#0480be);background-image:-webkit-gradient(linear,0 0,0 100%,from(#149bdf),to(#0480be));background-image:-webkit-linear-gradient(top,#149bdf,#0480be);background-image:-o-linear-gradient(top,#149bdf,#0480be);background-image:linear-gradient(to bottom,#149bdf,#0480be);background-repeat:repeat-x;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff149bdf',endColorstr='#ff0480be',GradientType=0);-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-moz-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;-webkit-transition:width .6s ease;-moz-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress .bar+.bar{-webkit-box-shadow:inset 1px 0 0 rgba(0,0,0,0.15),inset 0 -1px 0 rgba(0,0,0,0.15);-moz-box-shadow:inset 1px 0 0 rgba(0,0,0,0.15),inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 1px 0 0 rgba(0,0,0,0.15),inset 0 -1px 0 rgba(0,0,0,0.15)}.progress-striped .bar{background-color:#149bdf;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;-moz-background-size:40px 40px;-o-background-size:40px 40px;background-size:40px 40px}.progress.active .bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-moz-animation:progress-bar-stripes 2s linear infinite;-ms-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-danger .bar,.progress .bar-danger{background-color:#dd514c;background-image:-moz-linear-gradient(top,#ee5f5b,#c43c35);background-image:-webkit-gradient(linear,0 0,0 100%,from(#ee5f5b),to(#c43c35));background-image:-webkit-linear-gradient(top,#ee5f5b,#c43c35);background-image:-o-linear-gradient(top,#ee5f5b,#c43c35);background-image:linear-gradient(to bottom,#ee5f5b,#c43c35);background-repeat:repeat-x;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ffee5f5b',endColorstr='#ffc43c35',GradientType=0)}.progress-danger.progress-striped .bar,.progress-striped .bar-danger{background-color:#ee5f5b;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-success .bar,.progress .bar-success{background-color:#5eb95e;background-image:-moz-linear-gradient(top,#62c462,#57a957);background-image:-webkit-gradient(linear,0 0,0 100%,from(#62c462),to(#57a957));background-image:-webkit-linear-gradient(top,#62c462,#57a957);background-image:-o-linear-gradient(top,#62c462,#57a957);background-image:linear-gradient(to bottom,#62c462,#57a957);background-repeat:repeat-x;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff62c462',endColorstr='#ff57a957',GradientType=0)}.progress-success.progress-striped .bar,.progress-striped .bar-success{background-color:#62c462;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-info .bar,.progress .bar-info{background-color:#4bb1cf;background-image:-moz-linear-gradient(top,#5bc0de,#339bb9);background-image:-webkit-gradient(linear,0 0,0 100%,from(#5bc0de),to(#339bb9));background-image:-webkit-linear-gradient(top,#5bc0de,#339bb9);background-image:-o-linear-gradient(top,#5bc0de,#339bb9);background-image:linear-gradient(to bottom,#5bc0de,#339bb9);background-repeat:repeat-x;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff339bb9',GradientType=0)}.progress-info.progress-striped .bar,.progress-striped .bar-info{background-color:#5bc0de;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-warning .bar,.progress .bar-warning{background-color:#faa732;background-image:-moz-linear-gradient(top,#fbb450,#f89406);background-image:-webkit-gradient(linear,0 0,0 100%,from(#fbb450),to(#f89406));background-image:-webkit-linear-gradient(top,#fbb450,#f89406);background-image:-o-linear-gradient(top,#fbb450,#f89406);background-image:linear-gradient(to bottom,#fbb450,#f89406);background-repeat:repeat-x;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#fffbb450',endColorstr='#fff89406',GradientType=0)}.progress-warning.progress-striped .bar,.progress-striped .bar-warning{background-color:#fbb450;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.accordion{margin-bottom:20px}.accordion-group{margin-bottom:2px;border:1px solid #e5e5e5;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.accordion-heading{border-bottom:0}.accordion-heading .accordion-toggle{display:block;padding:8px 15px}.accordion-toggle{cursor:pointer}.accordion-inner{padding:9px 15px;border-top:1px solid #e5e5e5}.carousel{position:relative;margin-bottom:20px;line-height:1}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel .item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-moz-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel .item>img{display:block;line-height:1}.carousel .active,.carousel .next,.carousel .prev{display:block}.carousel .active{left:0}.carousel .next,.carousel .prev{position:absolute;top:0;width:100%}.carousel .next{left:100%}.carousel .prev{left:-100%}.carousel .next.left,.carousel .prev.right{left:0}.carousel .active.left{left:-100%}.carousel .active.right{left:100%}.carousel-control{position:absolute;top:40%;left:15px;width:40px;height:40px;margin-top:-20px;font-size:60px;font-weight:100;line-height:30px;color:#fff;text-align:center;background:#222;border:3px solid #fff;-webkit-border-radius:23px;-moz-border-radius:23px;border-radius:23px;opacity:.5;filter:alpha(opacity=50)}.carousel-control.right{right:15px;left:auto}.carousel-control:hover{color:#fff;text-decoration:none;opacity:.9;filter:alpha(opacity=90)}.carousel-caption{position:absolute;right:0;bottom:0;left:0;padding:15px;background:#333;background:rgba(0,0,0,0.75)}.carousel-caption h4,.carousel-caption p{line-height:20px;color:#fff}.carousel-caption h4{margin:0 0 5px}.carousel-caption p{margin-bottom:0}.hero-unit{padding:60px;margin-bottom:30px;background-color:#eee;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.hero-unit h1{margin-bottom:0;font-size:60px;line-height:1;letter-spacing:-1px;color:inherit}.hero-unit p{font-size:18px;font-weight:200;line-height:30px;color:inherit}.pull-right{float:right}.pull-left{float:left}.hide{display:none}.show{display:block}.invisible{visibility:hidden}.affix{position:fixed} \ No newline at end of file diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/css/colors.css b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/css/colors.css deleted file mode 100644 index 8774c02..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/css/colors.css +++ /dev/null @@ -1,302 +0,0 @@ -/*** - - colors.css v2.0.0 - http://clrs.cc - @mrmrs - MIT License - -***/ -/* - - SKINS - - Backgrounds - - Colors - - Border colors - - SVG fills - - SVG Strokes - -*/ -/* Backgrounds */ -.bg-navy { - background-color: #001F3F; } - -.bg-blue { - background-color: #0074D9; } - -.bg-aqua { - background-color: #7FDBFF; } - -.bg-teal { - background-color: #39CCCC; } - -.bg-olive { - background-color: #3D9970; } - -.bg-green { - background-color: #2ECC40; } - -.bg-lime { - background-color: #01FF70; } - -.bg-yellow { - background-color: #FFDC00; } - -.bg-orange { - background-color: #FF851B; } - -.bg-red { - background-color: #FF4136; } - -.bg-fuchsia { - background-color: #F012BE; } - -.bg-purple { - background-color: #B10DC9; } - -.bg-maroon { - background-color: #85144B; } - -.bg-white { - background-color: #fff; } - -.bg-gray { - background-color: #aaa; } - -.bg-silver { - background-color: #ddd; } - -.bg-black { - background-color: #111; } - -/* Colors */ -.navy { - color: #001F3F; } - -.blue { - color: #0074D9; } - -.aqua { - color: #7FDBFF; } - -.teal { - color: #39CCCC; } - -.olive { - color: #3D9970; } - -.green { - color: #2ECC40; } - -.lime { - color: #01FF70; } - -.yellow { - color: #FFDC00; } - -.orange { - color: #FF851B; } - -.red { - color: #FF4136; } - -.fuchsia { - color: #F012BE; } - -.purple { - color: #B10DC9; } - -.maroon { - color: #85144B; } - -.white { - color: #fff; } - -.silver { - color: #ddd; } - -.gray { - color: #aaa; } - -.black { - color: #111; } - -/* Border colors - - Use with another border utility that sets border-width and style - i.e .border { border-width: 1px; border-style: solid; } -*/ -.border--navy { - border-color: #001F3F; } - -.border--blue { - border-color: #0074D9; } - -.border--aqua { - border-color: #7FDBFF; } - -.border--teal { - border-color: #39CCCC; } - -.border--olive { - border-color: #3D9970; } - -.border--green { - border-color: #2ECC40; } - -.border--lime { - border-color: #01FF70; } - -.border--yellow { - border-color: #FFDC00; } - -.border--orange { - border-color: #FF851B; } - -.border--red { - border-color: #FF4136; } - -.border--fuchsia { - border-color: #F012BE; } - -.border--purple { - border-color: #B10DC9; } - -.border--maroon { - border-color: #85144B; } - -.border--white { - border-color: #fff; } - -.border--gray { - border-color: #aaa; } - -.border--silver { - border-color: #ddd; } - -.border--black { - border-color: #111; } - -/* Fills for SVG */ -.fill-navy { - fill: #001F3F; } - -.fill-blue { - fill: #0074D9; } - -.fill-aqua { - fill: #7FDBFF; } - -.fill-teal { - fill: #39CCCC; } - -.fill-olive { - fill: #3D9970; } - -.fill-green { - fill: #2ECC40; } - -.fill-lime { - fill: #01FF70; } - -.fill-yellow { - fill: #FFDC00; } - -.fill-orange { - fill: #FF851B; } - -.fill-red { - fill: #FF4136; } - -.fill-fuchsia { - fill: #F012BE; } - -.fill-purple { - fill: #B10DC9; } - -.fill-maroon { - fill: #85144B; } - -.fill-white { - fill: #fff; } - -.fill-gray { - fill: #aaa; } - -.fill-silver { - fill: #ddd; } - -.fill-black { - fill: #111; } - -/* Strokes for SVG */ -.stroke-navy { - stroke: #001F3F; } - -.stroke-blue { - stroke: #0074D9; } - -.stroke-aqua { - stroke: #7FDBFF; } - -.stroke-teal { - stroke: #39CCCC; } - -.stroke-olive { - stroke: #3D9970; } - -.stroke-green { - stroke: #2ECC40; } - -.stroke-lime { - stroke: #01FF70; } - -.stroke-yellow { - stroke: #FFDC00; } - -.stroke-orange { - stroke: #FF851B; } - -.stroke-red { - stroke: #FF4136; } - -.stroke-fuchsia { - stroke: #F012BE; } - -.stroke-purple { - stroke: #B10DC9; } - -.stroke-maroon { - stroke: #85144B; } - -.stroke-white { - stroke: #fff; } - -.stroke-gray { - stroke: #aaa; } - -.stroke-silver { - stroke: #ddd; } - -.stroke-black { - stroke: #111; } - -/* PRETTIER LINKS */ -a { - text-decoration: none; - -webkit-transition: color .3s ease-in-out; - transition: color .3s ease-in-out; } - -a:link { - -webkit-transition: color .3s ease-in-out; - transition: color .3s ease-in-out; } - -a:visited { } - -a:hover { - color: #001F3F; - -webkit-transition: color .3s ease-in-out; - transition: color .3s ease-in-out; } - -a:active { - -webkit-transition: color .3s ease-in-out; - transition: color .3s ease-in-out; } diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/css/style.css b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/css/style.css deleted file mode 100644 index ca7987f..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/css/style.css +++ /dev/null @@ -1,271 +0,0 @@ -/* main stylesheet */ - -@import url(http://fonts.googleapis.com/css?family=Signika); - -html { - overflow-y: scroll; -} - -body { - font-size: 15px; - font-family: HelveticaNeue, 'Helvetica Neue', Helvetica, Arial, sans-serif; - color: #332; -} - -h1, h2, h3, h4, h5 { - color: #332; - font-family: HelveticaNeue, 'Helvetica Neue', Helvetica, Arial, sans-serif; - font-weight: 400; - font-size: 1.4em; - line-height: 1.1; - margin-top: 30px; -} - -pre code { - font: 14px/19px Inconsolata, Monaco,"Lucida Console",Terminal,"Courier New",Courier; -} - -.figure { - text-align: center; -} - -.small .figure img { - height: 200px; -} - -.pagetitle .figure { - text-align: left !important; -} - -.pagetitle .figure img { - height: 36px; -} - -table{ - background:#fff; - border:1px solid #ccc; - border-width:2px; - border-collapse:collapse; - margin:5px 0 10px; - - margin-top: 20px; - margin-bottom: 20px; -} - -th, td{ - border:1px solid #ccc; - padding:3px 10px; - text-align:left; - vertical-align:top; -} - -tr.even td{ - background:#f7f7f7; -} - -th{ - background:#edeff0; -} - -td code { - border: 0px; -} - -img { - max-width: 100%; - height: auto; -} - -hr { - border: 0px; - height: 0; - border-bottom: 1px solid #ccc; - margin-bottom: 100px; -} - -/* Logo */ - -.logo { - text-align: center; -} - -.tagline { - font-family: Georgia; - font-size: 18px; - font-style: italic; - line-height: 1.45; - color: #383838; -} - -.author { -} - -.halfbreak { - padding-bottom: 100px; -} - -.break { - padding-bottom: 200px; -} - -/* TOC Links */ - -a { - color: #111111; - text-decoration: none; -} - -.body li a { - text-decoration: underline; -} - -/* Math */ - -.MathJax_Display { - padding-top: 20px; - padding-bottom: 20px; -} - -/* Body Links */ - -p a { - text-decoration: underline; -} - -li code, p code { - font-size: 12px; - border: 1px solid #ccc; - margin-left: 3px; - margin-right: 3px; - padding-left: 2px; - padding-right: 2px; -} - -/* */ - -.center { - text-align: center; -} - -.bigger img { - width: 120%; - height: 120%; -} - -pre { - font-size: 0.9em; - - margin-bottom: 18px; - margin-top: 18px; - - border-left: 1px solid #ccc; - -} - -h1 { - margin-top: 0px; -} - -.annotation { - font-size: 10pt; -} - -.annotation pre { - display: block; - margin: 0; - padding: 7px 10px; - overflow-x: auto; -} - -.annotation.span2 { - /* Override bootstrap */ - margin-left: 0px !important; - margin-top: 18px !important; -} - -.annotation pre code { - border: 0; - padding: 0; - background: transparent; -} - -blockquote { - border-left: 1px solid #ccc; - font-family: Georgia, serif; - font-size: 14px; - font-style: italic; - margin: 0.25em 0; - padding-left: 10px; - line-height: 1.45; - color: #383838; - left: 20px; -} - - -blockquote cite { - color: #999999; - font-size: 14px; - display: block; - margin-top: 5px; -} - -ul.sections { - list-style: none; - padding:0 0 5px 0; - margin:0; -} - -code.sourceCode { - padding: 0; - background: inherit; -} - -pre.sourceCode { - padding: 10px; -} - -ul.sections > li > div { - -moz-box-sizing: border-box; /* firefox */ - -ms-box-sizing: border-box; /* ie */ - -webkit-box-sizing: border-box; /* webkit */ - -khtml-box-sizing: border-box; /* konqueror */ - box-sizing: border-box; /* css3 */ -} - - -/* Make the naviation centered and larger on small screens */ -/*---------------------- (> 481px) ---------------------*/ - -@media only screen and (max-width: 481px) { - -} - -@media only screen and (min-width: 1025px) { - body { - padding: 10px; - } - - .side { - position: fixed; - width: 120px !important; - margin-left: 0px; - z-index: 1000; - } - - .side ul ul { - display: none; - } - - .side ul ul.active { - display: block; - } - - .side .active { - font-weight: bold; - } - - .body { - margin-left: 120px !important; - } - -} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/allmaps.cc b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/allmaps.cc deleted file mode 100644 index 89b1ac0..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/allmaps.cc +++ /dev/null @@ -1,75 +0,0 @@ -// Silly program just to test the natvis file for Visual Studio -// ------------------------------------------------------------ -#include -#include "parallel_hashmap/phmap.h" - -template -void test_set(const F &f) -{ - Set s; - typename Set::iterator it; - for (int i=0; i<100; ++i) - s.insert(f(i)); - - it = s.begin(); - ++it; - - it = s.end(); - it = s.begin(); - while(it != s.end()) - ++it; - it = s.begin(); -} - -int main(int, char **) -{ - using namespace std; - - auto make_int = [](int i) { return i; }; - auto make_string = [](int i) { return std::to_string(i); }; - - auto make_2int = [](int i) { return std::make_pair(i, i); }; - auto make_2string = [](int i) { return std::make_pair(std::to_string(i), std::to_string(i)); }; - - - test_set>(make_int); - test_set>(make_string); - - test_set>(make_int); - test_set>(make_string); - - test_set>(make_2int); - test_set>(make_2string); - - test_set>(make_2int); - test_set>(make_2string); - - test_set>(make_int); - test_set>(make_string); - - test_set>(make_int); - test_set>(make_string); - - test_set>(make_2int); - test_set>(make_2string); - - test_set>(make_2int); - test_set>(make_2string); - - // example of using default parameters in order to specify the mutex type. - // - // Please be aware that the iterators returned (by find for example) cannot - // be safely read in a multithreaded environment. Instead use if_contains(), - // which passes a reference value to the callback while holding the submap lock. - // Similarly, write access can be done safely using modify_if, try_emplace_l - // or lazy_emplace_l. - // ---------------------------------------------------------------------------- - using Map = phmap::parallel_flat_hash_map, - std::equal_to, - std::allocator>, - 4, - std::mutex>; - auto make_2size_t = [](size_t i) { return std::make_pair(i, i); }; - test_set(make_2size_t); -} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/basic.cc b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/basic.cc deleted file mode 100644 index 2b7f050..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/basic.cc +++ /dev/null @@ -1,28 +0,0 @@ -#include -#include -#include - -using phmap::flat_hash_map; - -int main() -{ - // Create an unordered_map of three strings (that map to strings) - flat_hash_map email = - { - { "tom", "tom@gmail.com"}, - { "jeff", "jk@gmail.com"}, - { "jim", "jimg@microsoft.com"} - }; - - // Iterate and print keys and values - for (const auto& n : email) - std::cout << n.first << "'s email is: " << n.second << "\n"; - - // Add a new entry - email["bill"] = "bg@whatever.com"; - - // and print it - std::cout << "bill's email is: " << email["bill"] << "\n"; - - return 0; -} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/bench.cc b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/bench.cc deleted file mode 100644 index 5fbdac3..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/bench.cc +++ /dev/null @@ -1,481 +0,0 @@ -#include - -#ifdef STL_UNORDERED - #include - #define MAPNAME std::unordered_map - #define EXTRAARGS -#elif defined(PHMAP_FLAT) - #include "parallel_hashmap/phmap.h" - #define MAPNAME phmap::flat_hash_map - #define NMSP phmap - #define EXTRAARGS -#else - #include "parallel_hashmap/phmap.h" - - #if 1 - #include - #define MTX std::mutex - #elif 0 - // Abseil's mutexes are very efficient (at least on windows) - #include "absl/synchronization/mutex.h" - #define MTX phmap::AbslMutex - #elif 1 - #include - #if 1 - #include - #define MTX boost::mutex // faster if all we do is exclusive locks like this bench - #else - #include - #define MTX boost::upgrade_mutex - #endif - #elif 1 - #include - class srwlock { - SRWLOCK _lock; - public: - srwlock() { InitializeSRWLock(&_lock); } - void lock() { AcquireSRWLockExclusive(&_lock); } - void unlock() { ReleaseSRWLockExclusive(&_lock); } - }; - #define MTX srwlock - #else - // spinlocks - slow! - #include - class spinlock { - std::atomic_flag flag = ATOMIC_FLAG_INIT; - public: - void lock() { while(flag.test_and_set(std::memory_order_acquire)); } - void unlock() { flag.clear(std::memory_order_release); } - }; - #define MTX spinlock - #endif - - #define MAPNAME phmap::parallel_flat_hash_map - #define NMSP phmap - - #define MT_SUPPORT 1 - #if MT_SUPPORT == 1 - // create the parallel_flat_hash_map without internal mutexes, for when - // we programatically ensure that each thread uses different internal submaps - // -------------------------------------------------------------------------- - #define EXTRAARGS , NMSP::priv::hash_default_hash, \ - NMSP::priv::hash_default_eq, \ - std::allocator>, 4, NMSP::NullMutex - #elif MT_SUPPORT == 2 - // create the parallel_flat_hash_map with internal mutexes, for when - // we read/write the same parallel_flat_hash_map from multiple threads, - // without any special precautions. - // -------------------------------------------------------------------------- - #define EXTRAARGS , NMSP::priv::hash_default_hash, \ - NMSP::priv::hash_default_eq, \ - std::allocator>, 4, MTX - #else - #define EXTRAARGS - #endif -#endif - -#define phmap_xstr(s) phmap_str(s) -#define phmap_str(s) #s - -template -using HashT = MAPNAME; - -using hash_t = HashT; -using str_hash_t = HashT; - -const char *program_slug = phmap_xstr(MAPNAME); // "_4"; - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "parallel_hashmap/meminfo.h" -#include -using std::vector; - -int64_t _abs(int64_t x) { return (x < 0) ? -x : x; } - -#ifdef _MSC_VER - #pragma warning(disable : 4996) -#endif // _MSC_VER - -// -------------------------------------------------------------------------- -class Timer -{ - typedef std::chrono::high_resolution_clock high_resolution_clock; - typedef std::chrono::milliseconds milliseconds; - -public: - explicit Timer(bool run = false) { if (run) reset(); } - void reset() { _start = high_resolution_clock::now(); } - - milliseconds elapsed() const - { - return std::chrono::duration_cast(high_resolution_clock::now() - _start); - } - -private: - high_resolution_clock::time_point _start; -}; - - -// -------------------------------------------------------------------------- -// from: https://github.com/preshing/RandomSequence -// -------------------------------------------------------------------------- -class RSU -{ -private: - unsigned int m_index; - unsigned int m_intermediateOffset; - - static unsigned int permuteQPR(unsigned int x) - { - static const unsigned int prime = 4294967291u; - if (x >= prime) - return x; // The 5 integers out of range are mapped to themselves. - unsigned int residue = ((unsigned long long) x * x) % prime; - return (x <= prime / 2) ? residue : prime - residue; - } - -public: - RSU(unsigned int seedBase, unsigned int seedOffset) - { - m_index = permuteQPR(permuteQPR(seedBase) + 0x682f0161); - m_intermediateOffset = permuteQPR(permuteQPR(seedOffset) + 0x46790905); - } - - unsigned int next() - { - return permuteQPR((permuteQPR(m_index++) + m_intermediateOffset) ^ 0x5bf03635); - } -}; - -// -------------------------------------------------------------------------- -char * new_string_from_integer(uint64_t num) -{ - int ndigits = num == 0 ? 1 : (int)log10(num) + 1; - char * str = (char *)malloc(ndigits + 1); - sprintf(str, "%u", (unsigned int)num); - return str; -} - -// -------------------------------------------------------------------------- -template -void _fill(vector &v) -{ - srand(1); // for a fair/deterministic comparison - for (size_t i = 0, sz = v.size(); i < sz; ++i) - v[i] = (T)(i * 10 + rand() % 10); -} - -// -------------------------------------------------------------------------- -template -void _shuffle(vector &v) -{ - for (size_t n = v.size(); n >= 2; --n) - std::swap(v[n - 1], v[static_cast(rand()) % n]); -} - -// -------------------------------------------------------------------------- -template -Timer _fill_random(vector &v, HT &hash) -{ - _fill(v); - _shuffle(v); - - Timer timer(true); - - for (size_t i = 0, sz = v.size(); i < sz; ++i) - hash.insert(typename HT::value_type(v[i], 0)); - return timer; -} - -// -------------------------------------------------------------------------- -void out(const char* test, int64_t cnt, const Timer &t, bool = false) -{ - printf("%s,time,%u,%s,%f\n", test, (unsigned int)cnt, program_slug, - (float)((double)t.elapsed().count() / 1000)); -} - -// -------------------------------------------------------------------------- -void outmem(const char*, int64_t cnt, uint64_t mem, bool final = false) -{ - static uint64_t max_mem = 0; - static uint64_t max_keys = 0; - if (final) - printf("peak memory usage for %u values: %.2f GB\n", (unsigned int)max_keys, - max_mem / ((double)1000 * 1000 * 1000)); - else { - if (mem > max_mem) - max_mem = mem; - if ((uint64_t)cnt > max_keys) - max_keys = cnt; - } -} - -static bool all_done = false; -static int64_t s_num_keys[16] = { 0 }; -static int64_t loop_idx = 0; -static int64_t inner_cnt = 0; -static const char *test = "random"; - -// -------------------------------------------------------------------------- -template -void _fill_random_inner(int64_t cnt, HT &hash, RSU &rsu) -{ - for (int64_t i=0; i -void _fill_random_inner_mt(int64_t cnt, HT &hash, RSU &rsu) -{ - constexpr int64_t num_threads = 8; // has to be a power of two - std::unique_ptr threads[num_threads]; - - auto thread_fn = [&hash, cnt, num_threads](size_t thread_idx, RSU rsu_) { -#if MT_SUPPORT - size_t modulo = hash.subcnt() / num_threads; // subcnt() returns the number of submaps - - for (int64_t i=0; ijoin(); -} - -// -------------------------------------------------------------------------- -size_t total_num_keys() -{ - size_t n = 0; - for (int i=0; i<16; ++i) - n += s_num_keys[i]; - return n; -} - -// -------------------------------------------------------------------------- -template -Timer _fill_random2(int64_t cnt, HT &hash) -{ - test = "random"; - unsigned int seed = 76687; - RSU rsu(seed, seed + 1); - - Timer timer(true); - const int64_t num_loops = 10; - inner_cnt = cnt / num_loops; - - for (int i=0; i<16; ++i) - s_num_keys[i] = 0; - - for (loop_idx=0; loop_idx -Timer _lookup(vector &v, HT &hash, size_t &num_present) -{ - _fill_random(v, hash); - - num_present = 0; - size_t max_val = v.size() * 10; - Timer timer(true); - - for (size_t i = 0, sz = v.size(); i < sz; ++i) - { - num_present += (size_t)(hash.find(v[i]) != hash.end()); - num_present += (size_t)(hash.find((T)(rand() % max_val)) != hash.end()); - } - return timer; -} - -// -------------------------------------------------------------------------- -template -Timer _delete(vector &v, HT &hash) -{ - _fill_random(v, hash); - _shuffle(v); // don't delete in insertion order - - Timer timer(true); - - for(size_t i = 0, sz = v.size(); i < sz; ++i) - hash.erase(v[i]); - return timer; -} - -// -------------------------------------------------------------------------- -void memlog() -{ - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - uint64_t nbytes_old_out = spp::GetProcessMemoryUsed(); - uint64_t nbytes_old = spp::GetProcessMemoryUsed(); // last non outputted mem measurement - outmem(test, 0, nbytes_old); - int64_t last_loop = 0; - - while (!all_done) - { - uint64_t nbytes = spp::GetProcessMemoryUsed(); - - if ((double)_abs(nbytes - nbytes_old_out) / nbytes_old_out > 0.03 || - (double)_abs(nbytes - nbytes_old) / nbytes_old > 0.01) - { - if ((double)(nbytes - nbytes_old) / nbytes_old > 0.03) - outmem(test, total_num_keys() - 1, nbytes_old); - outmem(test, total_num_keys(), nbytes); - nbytes_old_out = nbytes; - last_loop = loop_idx; - } - else if (loop_idx > last_loop) - { - outmem(test, total_num_keys(), nbytes); - nbytes_old_out = nbytes; - last_loop = loop_idx; - } - nbytes_old = nbytes; - - std::this_thread::sleep_for(std::chrono::milliseconds(1)); - } -} - - -// -------------------------------------------------------------------------- -int main(int argc, char ** argv) -{ - int64_t num_keys = 100000000; - const char *bench_name = "random"; - int64_t i, value = 0; - - if(argc > 2) - { - num_keys = atoi(argv[1]); - bench_name = argv[2]; - } - - hash_t hash; - str_hash_t str_hash; - - srand(1); // for a fair/deterministic comparison - Timer timer(true); - -#if MT_SUPPORT - if (!strcmp(program_slug,"absl::parallel_flat_hash_map") || - !strcmp(program_slug,"phmap::parallel_flat_hash_map")) - program_slug = phmap_xstr(MAPNAME) "_mt"; -#endif - - std::thread t1(memlog); - - try - { - if(!strcmp(bench_name, "sequential")) - { - for(i = 0; i < num_keys; i++) - hash.insert(hash_t::value_type(i, value)); - } -#if 0 - else if(!strcmp(bench_name, "random")) - { - vector v(num_keys); - timer = _fill_random(v, hash); - out("random", num_keys, timer); - } -#endif - else if(!strcmp(bench_name, "random")) - { - fprintf(stderr, "size = %zu\n", sizeof(hash)); - timer = _fill_random2(num_keys, hash); - } - else if(!strcmp(bench_name, "lookup")) - { - vector v(num_keys); - size_t num_present; - - timer = _lookup(v, hash, num_present); - //fprintf(stderr, "found %zu\n", num_present); - } - else if(!strcmp(bench_name, "delete")) - { - vector v(num_keys); - timer = _delete(v, hash); - } - else if(!strcmp(bench_name, "sequentialstring")) - { - for(i = 0; i < num_keys; i++) - str_hash.insert(str_hash_t::value_type(new_string_from_integer(i), value)); - } - else if(!strcmp(bench_name, "randomstring")) - { - for(i = 0; i < num_keys; i++) - str_hash.insert(str_hash_t::value_type(new_string_from_integer((int)rand()), value)); - } - else if(!strcmp(bench_name, "deletestring")) - { - for(i = 0; i < num_keys; i++) - str_hash.insert(str_hash_t::value_type(new_string_from_integer(i), value)); - timer.reset(); - for(i = 0; i < num_keys; i++) - str_hash.erase(new_string_from_integer(i)); - } - - - //printf("%f\n", (float)((double)timer.elapsed().count() / 1000)); - fflush(stdout); - //std::this_thread::sleep_for(std::chrono::seconds(1000)); - } - catch (...) - { - } - - all_done = true; - outmem(test, 0, 0, true); - t1.join(); - return 0; -} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/btree.cc b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/btree.cc deleted file mode 100644 index 4ff8e23..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/btree.cc +++ /dev/null @@ -1,49 +0,0 @@ -#include -#include "btree_fwd.h" -#include - -int main() -{ - // initialise map with some values using an initializer_list - phmap::btree_map map = - { { "John", 35 }, - { "Jane", 32 }, - { "Joe", 30 }, - }; - - // add a couple more values using operator[]() - map["lucy"] = 18; - map["Andre"] = 20; - - auto it = map.find("Joe"); - map.erase(it); - - map.insert(std::make_pair("Alex", 16)); - map.emplace("Emily", 18); // emplace uses pair template constructor - - for (auto& p: map) - std::cout << p.first << ", " << p.second << '\n'; - - IntString map2; // IntString is declared in btree_fwd.h - - map2.emplace(std::piecewise_construct, std::forward_as_tuple(0), std::forward_as_tuple(10, 'c')); - map2.try_emplace(1, 10, 'a'); // phmap::btree_map supports c++17 API - - for (auto& p: map2) - std::cout << p.first << ", " << p.second << '\n'; - - // create a btree_set of tuples - using X = std::tuple; - phmap::btree_set set; - - for (int i=0; i<10; ++i) - set.insert(X((float)i, std::to_string(i))); - set.emplace(15.0f, "15"); - - set.erase(X(1.0f, "1")); - - for (auto& e: set) - std::cout << std::get<0>(e) << ", \"" << std::get<1>(e) << "\" \n"; - - return 0; -} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/btree_fwd.h b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/btree_fwd.h deleted file mode 100644 index bd8c5e7..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/btree_fwd.h +++ /dev/null @@ -1,6 +0,0 @@ -#include - -#include - -using IntString = phmap::btree_map; - diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/dump_load.cc b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/dump_load.cc deleted file mode 100644 index 6b003e4..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/dump_load.cc +++ /dev/null @@ -1,53 +0,0 @@ -#include -#include - -void dump_load_uint64_uint32() { - phmap::flat_hash_map mp1 = { {100, 99}, {300, 299} }; - - for (const auto& n : mp1) - std::cout << n.first << "'s value is: " << n.second << "\n"; - - { - phmap::BinaryOutputArchive ar_out("./dump.data"); - mp1.phmap_dump(ar_out); - } - - phmap::flat_hash_map mp2; - { - phmap::BinaryInputArchive ar_in("./dump.data"); - mp2.phmap_load(ar_in); - } - - for (const auto& n : mp2) - std::cout << n.first << "'s value is: " << n.second << "\n"; -} - -void dump_load_parallel_flat_hash_map() { - phmap::parallel_flat_hash_map mp1 = { - {100, 99}, {300, 299}, {101, 992} }; - - for (const auto& n : mp1) - std::cout << "key: " << n.first << ", value: " << n.second << "\n"; - - { - phmap::BinaryOutputArchive ar_out("./dump.data"); - mp1.phmap_dump(ar_out); - } - - phmap::parallel_flat_hash_map mp2; - { - phmap::BinaryInputArchive ar_in("./dump.data"); - mp2.phmap_load(ar_in); - } - - for (const auto& n : mp2) - std::cout << "key: " << n.first << ", value: " << n.second << "\n"; -} - -int main() -{ - dump_load_uint64_uint32(); - dump_load_parallel_flat_hash_map(); - return 0; -} - diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/dump_nested.cc b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/dump_nested.cc deleted file mode 100644 index c207ba3..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/dump_nested.cc +++ /dev/null @@ -1,88 +0,0 @@ -/* - * - * Example of dumping a map, containing values which are phmap maps or sets - * building this requires c++17 support - * - */ - -#include -#include - -template -class MyMap : public phmap::flat_hash_map> -{ -public: - using Set = phmap::flat_hash_set; - - void dump(const std::string &filename) - { - phmap::BinaryOutputArchive ar_out (filename.c_str()); - - ar_out.dump(this->size()); - for (auto& [k, v] : *this) - { - ar_out.dump(k); - v.dump(ar_out); - } - } - - void load(const std::string & filename) - { - phmap::BinaryInputArchive ar_in(filename.c_str()); - - size_t size; - ar_in.load(&size); - this->reserve(size); - - while (size--) - { - K k; - Set v; - - ar_in.load(&k); - v.load(ar_in); - - this->insert_or_assign(std::move(k), std::move(v)); - } - } - - void insert(K k, V v) - { - Set &set = (*this)[k]; - set.insert(v); - } - - friend std::ostream& operator<<(std::ostream& os, const MyMap& map) - { - for (const auto& [k, m] : map) - { - os << k << ": ["; - for (const auto& x : m) - os << x << ", "; - os << "]\n"; - } - return os; - } -}; - -int main() -{ - MyMap m; - m.insert(1, 5); - m.insert(1, 8); - m.insert(2, 3); - m.insert(1, 15); - m.insert(1, 27); - m.insert(2, 10); - m.insert(2, 13); - - std::cout << m << "\n"; - - m.dump("test_archive"); - m.clear(); - m.load("test_archive"); - - std::cout << m << "\n"; - - return 0; -} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/emplace.cc b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/emplace.cc deleted file mode 100644 index 540318c..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/emplace.cc +++ /dev/null @@ -1,166 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -#include - -template -using milliseconds = std::chrono::duration; - -// type containing std::string. Seems to take a long time to construct (and maybe move) -// ------------------------------------------------------------------------------------ -class custom_type -{ - std::string one = "one"; - std::string two = "two"; - std::uint32_t three = 3; - std::uint64_t four = 4; - std::uint64_t five = 5; -public: - custom_type() = default; - - // Make object movable and non-copyable - custom_type(custom_type &&) = default; - custom_type& operator=(custom_type &&) = default; - - // should be automatically deleted per http://www.slideshare.net/ripplelabs/howard-hinnant-accu2014 - //custom_type(custom_type const&) = delete; - //custom_type& operator=(custom_type const&) = delete; -}; - -// type containing only integrals. should be faster to create. -// ----------------------------------------------------------- -class custom_type_2 -{ - std::uint32_t three = 3; - std::uint64_t four = 4; - std::uint64_t five = 5; - std::uint64_t six = 6; -public: - custom_type_2() = default; - - // Make object movable and non-copyable - custom_type_2(custom_type_2 &&) = default; - custom_type_2& operator=(custom_type_2 &&) = default; - - // should be automatically deleted per http://www.slideshare.net/ripplelabs/howard-hinnant-accu2014 - //custom_type_2(custom_type_2 const&) = delete; - //custom_type_2& operator=(custom_type_2 const&) = delete; -}; - -// convert std::size_t to appropriate key -// -------------------------------------- -template -struct GenKey -{ - K operator()(std::size_t j); -}; - -template <> -struct GenKey -{ - std::string operator()(std::size_t j) { - std::ostringstream stm; - stm << j; - return stm.str(); - } -}; - -template <> -struct GenKey -{ - int operator()(std::size_t j) { - return (int)j; - } -}; - -// emplace key + large struct -// -------------------------- -template struct _emplace -{ - void operator()(Map &m, std::size_t j); -}; - -// "void" template parameter -> use emplace -template struct _emplace -{ - void operator()(Map &m, std::size_t j) - { - m.emplace(GenKey()(j), V()); - } -}; - -// "int" template parameter -> use emplace_back for std::vector -template struct _emplace -{ - void operator()(Map &m, std::size_t j) - { - m.emplace_back(GenKey()(j), V()); - } -}; - -// The test itself -// --------------- -template class INSERT> -void _test(std::size_t iterations, std::size_t container_size, const char *map_name) -{ - std::size_t count = 0; - auto t1 = std::chrono::high_resolution_clock::now(); - INSERT insert; - for (std::size_t i=0; i(t2 - t1).count(); - if (count != iterations*container_size) - std::clog << " invalid count: " << count << "\n"; - std::clog << map_name << std::fixed << int(elapsed) << " ms\n"; -} - - -template class INSERT> -void test(std::size_t iterations, std::size_t container_size) -{ - std::clog << "bench: iterations: " << iterations << " / container_size: " << container_size << "\n"; - - _test, K, V, void, INSERT>(iterations, container_size, " std::map: "); - _test, K, V, void, INSERT>(iterations, container_size, " std::unordered_map: "); - _test, K, V, void, INSERT>(iterations, container_size, " phmap::flat_hash_map: "); - _test>, K, V, int, INSERT> (iterations, container_size, " std::vector: "); - std::clog << "\n"; - -} - -int main() -{ - std::size_t iterations = 100000; - - // test with custom_type_2 (int key + 32 byte value). This is representative - // of the hash table insertion speed. - // ------------------------------------------------------------------------- - std::clog << "\n\n" << "testing with " "\n"; - std::clog << "---------------------------------" "\n"; - test(iterations,10); - test(iterations,100); - test(iterations,500); - - // test with custom_type, which contains two std::string values, and use - // a generated string key. This is not very indicative of the speed of the - // hash itself, as a good chunk of the time is spent creating the keys and - // values (as shown by the long times even for std::vector). - // ----------------------------------------------------------------------- - std::clog << "\n" << "testing with " "\n"; - std::clog << "---------------------------------" "\n"; - test(iterations,1); - test(iterations,10); - test(iterations,50); - -} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/f1.cc b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/f1.cc deleted file mode 100644 index 9cec278..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/f1.cc +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Make sure that the phmap.h header builds fine when included in two separate - * source files - */ -#include -#include - -using phmap::flat_hash_map; - -int main() -{ - // Create an unordered_map of three strings (that map to strings) - using Map = flat_hash_map; - Map email = - { - { "tom", "tom@gmail.com"}, - { "jeff", "jk@gmail.com"}, - { "jim", "jimg@microsoft.com"} - }; - - extern void f2(Map&); - f2(email); - - return 0; -} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/f2.cc b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/f2.cc deleted file mode 100644 index eca567e..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/f2.cc +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Make sure that the phmap.h header builds fine when included in two separate - * source files - */ -#include -#include -#include - -using phmap::flat_hash_map; -using Map = flat_hash_map; - -void f2(Map& email) -{ - // Iterate and print keys and values - for (const auto& n : email) - std::cout << n.first << "'s email is: " << n.second << "\n"; - - // Add a new entry - email["bill"] = "bg@whatever.com"; - - // and print it - std::cout << "bill's email is: " << email["bill"] << "\n"; -} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/hash_std.cc b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/hash_std.cc deleted file mode 100644 index 04551ff..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/hash_std.cc +++ /dev/null @@ -1,20 +0,0 @@ -#include "hash_std.h" // defines Person with std::hash specialization - -#include -#include - -int main() -{ - // As we have defined a specialization of std::hash() for Person, - // we can now create sparse_hash_set or sparse_hash_map of Persons - // ---------------------------------------------------------------- - phmap::flat_hash_set persons = - { { "John", "Mitchell", 35 }, - { "Jane", "Smith", 32 }, - { "Jane", "Smith", 30 }, - }; - - for (auto& p: persons) - std::cout << p._first << ' ' << p._last << " (" << p._age << ")" << '\n'; - -} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/hash_std.h b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/hash_std.h deleted file mode 100644 index debf4a9..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/hash_std.h +++ /dev/null @@ -1,34 +0,0 @@ -#ifndef phmap_example_hash_std_ -#define phmap_example_hash_std_ - -#include // minimal header providing phmap::HashState() -#include -using std::string; - -struct Person -{ - bool operator==(const Person &o) const - { - return _first == o._first && _last == o._last && _age == o._age; - } - - string _first; - string _last; - int _age; -}; - -namespace std -{ - // inject specialization of std::hash for Person into namespace std - // An alternative is to provide a hash_value() friend function (see hash_value.h) - // ------------------------------------------------------------------------------ - template<> struct hash - { - std::size_t operator()(Person const &p) const - { - return phmap::HashState().combine(0, p._first, p._last, p._age); - } - }; -} - -#endif // phmap_example_hash_std_ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/hash_value.cc b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/hash_value.cc deleted file mode 100644 index 43d2b05..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/hash_value.cc +++ /dev/null @@ -1,20 +0,0 @@ -#include "hash_value.h" // defines Person with std::hash specialization - -#include -#include - -int main() -{ - // As we have defined a specialization of std::hash() for Person, - // we can now create sparse_hash_set or sparse_hash_map of Persons - // ---------------------------------------------------------------- - phmap::flat_hash_set persons = - { { "John", "Mitchell", 35 }, - { "Jane", "Smith", 32 }, - { "Jane", "Smith", 30 }, - }; - - for (auto& p: persons) - std::cout << p._first << ' ' << p._last << " (" << p._age << ")" << '\n'; - -} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/hash_value.h b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/hash_value.h deleted file mode 100644 index ffcc3e0..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/hash_value.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef phmap_example_hash_value_ -#define phmap_example_hash_value_ - -#include // minimal header providing phmap::HashState() -#include -using std::string; - -struct Person -{ - bool operator==(const Person &o) const - { - return _first == o._first && _last == o._last && _age == o._age; - } - - // Demonstrates how to provide the hash function as a friend member function of the class - // This can be used as an alternative to providing a std::hash specialization - // -------------------------------------------------------------------------------------- - friend size_t hash_value(const Person &p) - { - return phmap::HashState().combine(0, p._first, p._last, p._age); - } - - string _first; - string _last; - int _age; -}; - -#endif // phmap_example_hash_value_ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/insert_bench.cc b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/insert_bench.cc deleted file mode 100644 index b01b982..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/insert_bench.cc +++ /dev/null @@ -1,143 +0,0 @@ -#include -#include -#include -#include -#include -#include -#define PHMAP_ALLOCATOR_NOTHROW 1 -#include - -// this is probably the fastest high quality 64bit random number generator that exists. -// Implements Small Fast Counting v4 RNG from PractRand. -class sfc64 { -public: - using result_type = uint64_t; - - // no copy ctors so we don't accidentally get the same random again - sfc64(sfc64 const&) = delete; - sfc64& operator=(sfc64 const&) = delete; - - sfc64(sfc64&&) = default; - sfc64& operator=(sfc64&&) = default; - - sfc64(std::array const& _state) - : m_a(_state[0]) - , m_b(_state[1]) - , m_c(_state[2]) - , m_counter(_state[3]) {} - - static constexpr uint64_t(min)() { - return (std::numeric_limits::min)(); - } - static constexpr uint64_t(max)() { - return (std::numeric_limits::max)(); - } - - sfc64() - : sfc64(UINT64_C(0x853c49e6748fea9b)) {} - - sfc64(uint64_t _seed) - : m_a(_seed) - , m_b(_seed) - , m_c(_seed) - , m_counter(1) { - for (int i = 0; i < 12; ++i) { - operator()(); - } - } - - void seed() { - *this = sfc64{std::random_device{}()}; - } - - uint64_t operator()() noexcept { - auto const tmp = m_a + m_b + m_counter++; - m_a = m_b ^ (m_b >> right_shift); - m_b = m_c + (m_c << left_shift); - m_c = rotl(m_c, rotation) + tmp; - return tmp; - } - - // this is a bit biased, but for our use case that's not important. - uint64_t operator()(uint64_t boundExcluded) noexcept { -#ifdef PHMAP_HAS_UMUL128 - uint64_t h; - (void)umul128(operator()(), boundExcluded, &h); - return h; -#else - return 0; -#endif - } - - std::array state() const { - return {{m_a, m_b, m_c, m_counter}}; - } - - void state(std::array const& s) { - m_a = s[0]; - m_b = s[1]; - m_c = s[2]; - m_counter = s[3]; - } - -private: - template - T rotl(T const x, int k) { - return (x << k) | (x >> (8 * sizeof(T) - k)); - } - - static constexpr int rotation = 24; - static constexpr int right_shift = 11; - static constexpr int left_shift = 3; - uint64_t m_a; - uint64_t m_b; - uint64_t m_c; - uint64_t m_counter; -}; - - -int main() -{ - // Create an unordered_map of three strings (that map to strings) - using Map = phmap::parallel_node_hash_map; - static size_t const n = 50000000; - sfc64 rng(123); - - size_t checksum = 0; - - if (0) - { - size_t const max_rng = n / 20; - Map map; - for (size_t i = 0; i < n; ++i) { - checksum += ++map[static_cast(rng(max_rng))]; - } - } - - if (0) - { - size_t const max_rng = n / 4; - Map map; - for (size_t i = 0; i < n; ++i) { - checksum += ++map[static_cast(rng(max_rng))]; - } - } - - if (1) - { - size_t const max_rng = n / 2; - Map map; - for (size_t i = 0; i < n; ++i) { - checksum += ++map[static_cast(rng(max_rng))]; - } - } - - if (0) - { - Map map; - for (size_t i = 0; i < n; ++i) { - checksum += ++map[static_cast(rng())]; - } - } - printf("%zu\n", checksum); -} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/knucleotide-input.txt b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/knucleotide-input.txt deleted file mode 100644 index f1caba0..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/knucleotide-input.txt +++ /dev/null @@ -1,171 +0,0 @@ ->ONE Homo sapiens alu -GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA -TCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACT -AAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAG -GCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCG -CCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGT -GGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCA -GGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAA -TTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAG -AATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCA -GCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGT -AATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACC -AGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTG -GTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACC -CGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAG -AGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTT -TGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACA -TGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCT -GTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGG -TTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGT -CTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGG -CGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCG -TCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTA -CTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCG -AGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCG -GGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACC -TGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAA -TACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGA -GGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACT -GCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTC -ACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGT -TCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGC -CGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCG -CTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTG -GGCGACAGAGCGAGACTCCG ->TWO IUB ambiguity codes -cttBtatcatatgctaKggNcataaaSatgtaaaDcDRtBggDtctttataattcBgtcg -tactDtDagcctatttSVHtHttKtgtHMaSattgWaHKHttttagacatWatgtRgaaa -NtactMcSMtYtcMgRtacttctWBacgaaatatagScDtttgaagacacatagtVgYgt -cattHWtMMWcStgttaggKtSgaYaaccWStcgBttgcgaMttBYatcWtgacaYcaga -gtaBDtRacttttcWatMttDBcatWtatcttactaBgaYtcttgttttttttYaaScYa -HgtgttNtSatcMtcVaaaStccRcctDaataataStcYtRDSaMtDttgttSagtRRca -tttHatSttMtWgtcgtatSSagactYaaattcaMtWatttaSgYttaRgKaRtccactt -tattRggaMcDaWaWagttttgacatgttctacaaaRaatataataaMttcgDacgaSSt -acaStYRctVaNMtMgtaggcKatcttttattaaaaagVWaHKYagtttttatttaacct -tacgtVtcVaattVMBcttaMtttaStgacttagattWWacVtgWYagWVRctDattBYt -gtttaagaagattattgacVatMaacattVctgtBSgaVtgWWggaKHaatKWcBScSWa -accRVacacaaactaccScattRatatKVtactatatttHttaagtttSKtRtacaaagt -RDttcaaaaWgcacatWaDgtDKacgaacaattacaRNWaatHtttStgttattaaMtgt -tgDcgtMgcatBtgcttcgcgaDWgagctgcgaggggVtaaScNatttacttaatgacag -cccccacatYScaMgtaggtYaNgttctgaMaacNaMRaacaaacaKctacatagYWctg -ttWaaataaaataRattagHacacaagcgKatacBttRttaagtatttccgatctHSaat -actcNttMaagtattMtgRtgaMgcataatHcMtaBSaRattagttgatHtMttaaKagg -YtaaBataSaVatactWtataVWgKgttaaaacagtgcgRatatacatVtHRtVYataSa -KtWaStVcNKHKttactatccctcatgWHatWaRcttactaggatctataDtDHBttata -aaaHgtacVtagaYttYaKcctattcttcttaataNDaaggaaaDYgcggctaaWSctBa -aNtgctggMBaKctaMVKagBaactaWaDaMaccYVtNtaHtVWtKgRtcaaNtYaNacg -gtttNattgVtttctgtBaWgtaattcaagtcaVWtactNggattctttaYtaaagccgc -tcttagHVggaYtgtNcDaVagctctctKgacgtatagYcctRYHDtgBattDaaDgccK -tcHaaStttMcctagtattgcRgWBaVatHaaaataYtgtttagMDMRtaataaggatMt -ttctWgtNtgtgaaaaMaatatRtttMtDgHHtgtcattttcWattRSHcVagaagtacg -ggtaKVattKYagactNaatgtttgKMMgYNtcccgSKttctaStatatNVataYHgtNa -BKRgNacaactgatttcctttaNcgatttctctataScaHtataRagtcRVttacDSDtt -aRtSatacHgtSKacYagttMHtWataggatgactNtatSaNctataVtttRNKtgRacc -tttYtatgttactttttcctttaaacatacaHactMacacggtWataMtBVacRaSaatc -cgtaBVttccagccBcttaRKtgtgcctttttRtgtcagcRttKtaaacKtaaatctcac -aattgcaNtSBaaccgggttattaaBcKatDagttactcttcattVtttHaaggctKKga -tacatcBggScagtVcacattttgaHaDSgHatRMaHWggtatatRgccDttcgtatcga -aacaHtaagttaRatgaVacttagattVKtaaYttaaatcaNatccRttRRaMScNaaaD -gttVHWgtcHaaHgacVaWtgttScactaagSgttatcttagggDtaccagWattWtRtg -ttHWHacgattBtgVcaYatcggttgagKcWtKKcaVtgaYgWctgYggVctgtHgaNcV -taBtWaaYatcDRaaRtSctgaHaYRttagatMatgcatttNattaDttaattgttctaa -ccctcccctagaWBtttHtBccttagaVaatMcBHagaVcWcagBVttcBtaYMccagat -gaaaaHctctaacgttagNWRtcggattNatcRaNHttcagtKttttgWatWttcSaNgg -gaWtactKKMaacatKatacNattgctWtatctaVgagctatgtRaHtYcWcttagccaa -tYttWttaWSSttaHcaaaaagVacVgtaVaRMgattaVcDactttcHHggHRtgNcctt -tYatcatKgctcctctatVcaaaaKaaaagtatatctgMtWtaaaacaStttMtcgactt -taSatcgDataaactaaacaagtaaVctaggaSccaatMVtaaSKNVattttgHccatca -cBVctgcaVatVttRtactgtVcaattHgtaaattaaattttYtatattaaRSgYtgBag -aHSBDgtagcacRHtYcBgtcacttacactaYcgctWtattgSHtSatcataaatataHt -cgtYaaMNgBaatttaRgaMaatatttBtttaaaHHKaatctgatWatYaacttMctctt -ttVctagctDaaagtaVaKaKRtaacBgtatccaaccactHHaagaagaaggaNaaatBW -attccgStaMSaMatBttgcatgRSacgttVVtaaDMtcSgVatWcaSatcttttVatag -ttactttacgatcaccNtaDVgSRcgVcgtgaacgaNtaNatatagtHtMgtHcMtagaa -attBgtataRaaaacaYKgtRccYtatgaagtaataKgtaaMttgaaRVatgcagaKStc -tHNaaatctBBtcttaYaBWHgtVtgacagcaRcataWctcaBcYacYgatDgtDHccta ->THREE Homo sapiens frequency -aacacttcaccaggtatcgtgaaggctcaagattacccagagaacctttgcaatataaga -atatgtatgcagcattaccctaagtaattatattctttttctgactcaaagtgacaagcc -ctagtgtatattaaatcggtatatttgggaaattcctcaaactatcctaatcaggtagcc -atgaaagtgatcaaaaaagttcgtacttataccatacatgaattctggccaagtaaaaaa -tagattgcgcaaaattcgtaccttaagtctctcgccaagatattaggatcctattactca -tatcgtgtttttctttattgccgccatccccggagtatctcacccatccttctcttaaag -gcctaatattacctatgcaaataaacatatattgttgaaaattgagaacctgatcgtgat -tcttatgtgtaccatatgtatagtaatcacgcgactatatagtgctttagtatcgcccgt -gggtgagtgaatattctgggctagcgtgagatagtttcttgtcctaatatttttcagatc -gaatagcttctatttttgtgtttattgacatatgtcgaaactccttactcagtgaaagtc -atgaccagatccacgaacaatcttcggaatcagtctcgttttacggcggaatcttgagtc -taacttatatcccgtcgcttactttctaacaccccttatgtatttttaaaattacgttta -ttcgaacgtacttggcggaagcgttattttttgaagtaagttacattgggcagactcttg -acattttcgatacgactttctttcatccatcacaggactcgttcgtattgatatcagaag -ctcgtgatgattagttgtcttctttaccaatactttgaggcctattctgcgaaatttttg -ttgccctgcgaacttcacataccaaggaacacctcgcaacatgccttcatatccatcgtt -cattgtaattcttacacaatgaatcctaagtaattacatccctgcgtaaaagatggtagg -ggcactgaggatatattaccaagcatttagttatgagtaatcagcaatgtttcttgtatt -aagttctctaaaatagttacatcgtaatgttatctcgggttccgcgaataaacgagatag -attcattatatatggccctaagcaaaaacctcctcgtattctgttggtaattagaatcac -acaatacgggttgagatattaattatttgtagtacgaagagatataaaaagatgaacaat -tactcaagtcaagatgtatacgggatttataataaaaatcgggtagagatctgctttgca -attcagacgtgccactaaatcgtaatatgtcgcgttacatcagaaagggtaactattatt -aattaataaagggcttaatcactacatattagatcttatccgatagtcttatctattcgt -tgtatttttaagcggttctaattcagtcattatatcagtgctccgagttctttattattg -ttttaaggatgacaaaatgcctcttgttataacgctgggagaagcagactaagagtcgga -gcagttggtagaatgaggctgcaaaagacggtctcgacgaatggacagactttactaaac -caatgaaagacagaagtagagcaaagtctgaagtggtatcagcttaattatgacaaccct -taatacttccctttcgccgaatactggcgtggaaaggttttaaaagtcgaagtagttaga -ggcatctctcgctcataaataggtagactactcgcaatccaatgtgactatgtaatactg -ggaacatcagtccgcgatgcagcgtgtttatcaaccgtccccactcgcctggggagacat -gagaccacccccgtggggattattagtccgcagtaatcgactcttgacaatccttttcga -ttatgtcatagcaatttacgacagttcagcgaagtgactactcggcgaaatggtattact -aaagcattcgaacccacatgaatgtgattcttggcaatttctaatccactaaagcttttc -cgttgaatctggttgtagatatttatataagttcactaattaagatcacggtagtatatt -gatagtgatgtctttgcaagaggttggccgaggaatttacggattctctattgatacaat -ttgtctggcttataactcttaaggctgaaccaggcgtttttagacgacttgatcagctgt -tagaatggtttggactccctctttcatgtcagtaacatttcagccgttattgttacgata -tgcttgaacaatattgatctaccacacacccatagtatattttataggtcatgctgttac -ctacgagcatggtattccacttcccattcaatgagtattcaacatcactagcctcagaga -tgatgacccacctctaataacgtcacgttgcggccatgtgaaacctgaacttgagtagac -gatatcaagcgctttaaattgcatataacatttgagggtaaagctaagcggatgctttat -ataatcaatactcaataataagatttgattgcattttagagttatgacacgacatagttc -actaacgagttactattcccagatctagactgaagtactgatcgagacgatccttacgtc -gatgatcgttagttatcgacttaggtcgggtctctagcggtattggtacttaaccggaca -ctatactaataacccatgatcaaagcataacagaatacagacgataatttcgccaacata -tatgtacagaccccaagcatgagaagctcattgaaagctatcattgaagtcccgctcaca -atgtgtcttttccagacggtttaactggttcccgggagtcctggagtttcgacttacata -aatggaaacaatgtattttgctaatttatctatagcgtcatttggaccaatacagaatat -tatgttgcctagtaatccactataacccgcaagtgctgatagaaaatttttagacgattt -ataaatgccccaagtatccctcccgtgaatcctccgttatactaattagtattcgttcat -acgtataccgcgcatatatgaacatttggcgataaggcgcgtgaattgttacgtgacaga -gatagcagtttcttgtgatatggttaacagacgtacatgaagggaaactttatatctata -gtgatgcttccgtagaaataccgccactggtctgccaatgatgaagtatgtagctttagg -tttgtactatgaggctttcgtttgtttgcagagtataacagttgcgagtgaaaaaccgac -gaatttatactaatacgctttcactattggctacaaaatagggaagagtttcaatcatga -gagggagtatatggatgctttgtagctaaaggtagaacgtatgtatatgctgccgttcat -tcttgaaagatacataagcgataagttacgacaattataagcaacatccctaccttcgta -acgatttcactgttactgcgcttgaaatacactatggggctattggcggagagaagcaga -tcgcgccgagcatatacgagacctataatgttgatgatagagaaggcgtctgaattgata -catcgaagtacactttctttcgtagtatctctcgtcctctttctatctccggacacaaga -attaagttatatatatagagtcttaccaatcatgttgaatcctgattctcagagttcttt -ggcgggccttgtgatgactgagaaacaatgcaatattgctccaaatttcctaagcaaatt -ctcggttatgttatgttatcagcaaagcgttacgttatgttatttaaatctggaatgacg -gagcgaagttcttatgtcggtgtgggaataattcttttgaagacagcactccttaaataa -tatcgctccgtgtttgtatttatcgaatgggtctgtaaccttgcacaagcaaatcggtgg -tgtatatatcggataacaattaatacgatgttcatagtgacagtatactgatcgagtcct -ctaaagtcaattacctcacttaacaatctcattgatgttgtgtcattcccggtatcgccc -gtagtatgtgctctgattgaccgagtgtgaaccaaggaacatctactaatgcctttgtta -ggtaagatctctctgaattccttcgtgccaacttaaaacattatcaaaatttcttctact -tggattaactacttttacgagcatggcaaattcccctgtggaagacggttcattattatc -ggaaaccttatagaaattgcgtgttgactgaaattagatttttattgtaagagttgcatc -tttgcgattcctctggtctagcttccaatgaacagtcctcccttctattcgacatcgggt -ccttcgtacatgtctttgcgatgtaataattaggttcggagtgtggccttaatgggtgca -actaggaatacaacgcaaatttgctgacatgatagcaaatcggtatgccggcaccaaaac -gtgctccttgcttagcttgtgaatgagactcagtagttaaataaatccatatctgcaatc -gattccacaggtattgtccactatctttgaactactctaagagatacaagcttagctgag -accgaggtgtatatgactacgctgatatctgtaaggtaccaatgcaggcaaagtatgcga -gaagctaataccggctgtttccagctttataagattaaaatttggctgtcctggcggcct -cagaattgttctatcgtaatcagttggttcattaattagctaagtacgaggtacaactta -tctgtcccagaacagctccacaagtttttttacagccgaaacccctgtgtgaatcttaat -atccaagcgcgttatctgattagagtttacaactcagtattttatcagtacgttttgttt -ccaacattacccggtatgacaaaatgacgccacgtgtcgaataatggtctgaccaatgta -ggaagtgaaaagataaatat diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/knucleotide-input0.txt b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/knucleotide-input0.txt deleted file mode 100644 index fd4414b..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/knucleotide-input0.txt +++ /dev/null @@ -1,4171 +0,0 @@ ->ONE Homo sapiens alu -GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA -TCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACT -AAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAG -GCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCG -CCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGT -GGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCA -GGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAA -TTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAG -AATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCA -GCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGT -AATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACC -AGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTG -GTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACC -CGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAG -AGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTT -TGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACA -TGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCT -GTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGG -TTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGT -CTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGG -CGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCG -TCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTA -CTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCG -AGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCG -GGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACC -TGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAA -TACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGA -GGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACT -GCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTC -ACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGT -TCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGC -CGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCG -CTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTG -GGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCC -CAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCT -GGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGC -GCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGA -GGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGA -GACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGA -GGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTG -AAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAAT -CCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCA -GTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAA -AAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGC -GGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCT -ACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGG -GAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATC -GCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGC -GGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGG -TCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAA -AAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAG -GAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACT -CCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCC -TGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAG -ACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGC -GTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGA -ACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGA -CAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCA -CTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCA -ACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCG -CCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGG -AGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTC -CGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCG -AGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACC -CCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAG -CTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAG -CCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGG -CCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATC -ACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAA -AAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGC -TGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCC -ACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGG -CTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGG -AGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATT -AGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAA -TCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGC -CTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAA -TCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAG -CCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGT -GGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCG -GGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAG -CGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTG -GGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATG -GTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGT -AATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTT -GCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCT -CAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCG -GGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTC -TCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACT -CGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAG -ATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGG -CGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTG -AGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATA -CAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGG -CAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGC -ACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCAC -GCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTC -GAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCG -GGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCT -TGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGG -CGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCA -GCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGG -CCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGC -GCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGG -CGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGA -CTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGG -CCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAA -ACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCC -CAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGT -GAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAA -AGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGG -ATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTAC -TAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGA -GGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGC -GCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGG -TGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTC -AGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAA -ATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGA -GAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCC -AGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTG -TAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGAC -CAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGT -GGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAAC -CCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACA -GAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACT -TTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAAC -ATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCC -TGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAG -GTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCG -TCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAG -GCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCC -GTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCT -ACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCC -GAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCC -GGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCAC -CTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAA -ATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTG -AGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCAC -TGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCT -CACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAG -TTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAG -CCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATC -GCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCT -GGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATC -CCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCC -TGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGG -CGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGG -AGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCG -AGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGG -AGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGT -GAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAA -TCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGC -AGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCA -AAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGG -CGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTC -TACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCG -GGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGAT -CGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCG -CGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAG -GTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACA -AAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCA -GGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCAC -TCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGC -CTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGA -GACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGG -CGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTG -AACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCG -ACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGC -ACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCC -AACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGC -GCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCG -GAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACT -CCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCC -GAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAAC -CCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCA -GCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGA -GCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAG -GCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGAT -CACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTA -AAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGG -CTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGC -CACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTG -GCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAG -GAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAAT -TAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGA -ATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAG -CCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTA -ATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCA -GCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGG -TGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCC -GGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGA -GCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTT -GGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACAT -GGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTG -TAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGT -TGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTC -TCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGC -GGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGT -CTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTAC -TCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGA -GATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGG -GCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCT -GAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAAT -ACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAG -GCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTG -CACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCA -CGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTT -CGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCC -GGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGC -TTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGG -GCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCC -AGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTG -GCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCG -CGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAG -GCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAG -ACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAG -GCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGA -AACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATC -CCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAG -TGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAA -AAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCG -GATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTA -CTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGG -AGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCG -CGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCG -GTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGT -CAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAA -AATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGG -AGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTC -CAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCT -GTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGA -CCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCG -TGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAA -CCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGAC -AGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCAC -TTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAA -CATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGC -CTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGA -GGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCC -GTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGA -GGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCC -CGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGC -TACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGC -CGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGC -CGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCA -CCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAA -AATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCT -GAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCA -CTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGC -TCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGA -GTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTA -GCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAAT -CGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCC -TGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAAT -CCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGC -CTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTG -GCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGG -GAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGC -GAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGG -GAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGG -TGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTA -ATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTG -CAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTC -AAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGG -GCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCT -CTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTC -GGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGA -TCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGC -GCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGA -GGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATAC -AAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGC -AGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCA -CTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACG -CCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCG -AGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGG -GCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTT -GAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGC -GACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAG -CACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGC -CAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCG -CGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGC -GGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGAC -TCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGC -CGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAA -CCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCC -AGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTG -AGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA -GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA -TCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACT -AAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAG -GCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCG -CCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGT -GGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCA -GGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAA -TTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAG -AATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCA -GCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGT -AATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACC -AGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTG -GTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACC -CGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAG -AGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTT -TGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACA -TGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCT -GTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGG -TTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGT -CTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGG -CGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCG -TCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTA -CTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCG -AGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCG -GGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACC -TGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAA -TACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGA -GGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACT -GCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTC -ACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGT -TCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGC -CGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCG -CTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTG -GGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCC -CAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCT -GGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGC -GCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGA -GGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGA -GACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGA -GGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTG -AAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAAT -CCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCA -GTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAA -AAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGC -GGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCT -ACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGG -GAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATC -GCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGC -GGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGG -TCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAA -AAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAG -GAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACT -CCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCC -TGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAG -ACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGC -GTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGA -ACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGA -CAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCA -CTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCA -ACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCG -CCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGG -AGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTC -CGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCG -AGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACC -CCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAG -CTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAG -CCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGG -CCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATC -ACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAA -AAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGC -TGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCC -ACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGG -CTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGG -AGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATT -AGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAA -TCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGC -CTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAA -TCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAG -CCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGT -GGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCG -GGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAG -CGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTG -GGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATG -GTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGT -AATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTT -GCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCT -CAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCG -GGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTC -TCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACT -CGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAG -ATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGG -CGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTG -AGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATA -CAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGG -CAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGC -ACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCAC -GCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTC -GAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCG -GGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCT -TGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGG -CGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCA -GCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGG -CCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGC -GCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGG -CGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGA -CTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGG -CCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAA -ACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCC -CAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGT -GAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAA -AGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGG -ATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTAC -TAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGA -GGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGC -GCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGG -TGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTC -AGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAA -ATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGA -GAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCC -AGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTG -TAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGAC -CAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGT -GGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAAC -CCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACA -GAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACT -TTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAAC -ATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCC -TGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAG -GTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCG -TCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAG -GCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCC -GTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCT -ACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCC -GAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCC -GGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCAC -CTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAA -ATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTG -AGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCAC -TGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCT -CACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAG -TTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAG -CCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATC -GCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCT -GGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATC -CCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCC -TGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGG -CGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGG -AGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCG -AGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGG -AGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGT -GAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAA -TCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGC -AGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCA -AAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGG -CGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTC -TACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCG -GGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGAT -CGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCG -CGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAG -GTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACA -AAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCA -GGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCAC -TCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGC -CTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGA -GACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGG -CGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTG -AACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCG -ACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGC -ACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCC -AACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGC -GCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCG -GAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACT -CCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCC -GAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAAC -CCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCA -GCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGA -GCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAG -GCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGAT -CACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTA -AAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGG -CTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGC -CACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTG -GCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAG -GAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAAT -TAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGA -ATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAG -CCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTA -ATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCA -GCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGG -TGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCC -GGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGA -GCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTT -GGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACAT -GGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTG -TAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGT -TGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTC -TCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGC -GGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGT -CTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTAC -TCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGA -GATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGG -GCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCT -GAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAAT -ACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAG -GCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTG -CACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCA -CGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTT -CGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCC -GGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGC -TTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGG -GCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCC -AGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTG -GCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCG -CGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAG -GCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAG -ACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAG -GCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGA -AACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATC -CCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAG -TGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAA -AAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCG -GATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTA -CTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGG -AGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCG -CGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCG -GTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGT -CAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAA -AATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGG -AGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTC -CAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCT -GTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGA -CCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCG -TGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAA -CCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGAC -AGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCAC -TTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAA -CATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGC -CTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGA -GGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCC -GTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGA -GGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCC -CGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGC -TACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGC -CGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGC -CGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCA -CCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAA -AATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCT -GAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCA -CTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGC -TCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGA -GTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTA -GCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAAT -CGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCC -TGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAAT -CCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGC -CTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTG -GCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGG -GAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGC -GAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGG -GAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGG -TGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTA -ATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTG -CAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTC -AAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGG -GCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCT -CTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTC -GGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGA -TCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGC -GCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGA -GGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATAC -AAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGC -AGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCA -CTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACG -CCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCG -AGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGG -GCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTT -GAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGC -GACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAG -CACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGC -CAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCG -CGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGC -GGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGAC -TCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGC -CGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAA -CCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCC -AGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTG -AGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA -GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA -TCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACT -AAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAG -GCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCG -CCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGT -GGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCA -GGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAA -TTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAG -AATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCA -GCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGT -AATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACC -AGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTG -GTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACC -CGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAG -AGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTT -TGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACA -TGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCT -GTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGG -TTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGT -CTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGG -CGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCG -TCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTA -CTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCG -AGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCG -GGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACC -TGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAA -TACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGA -GGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACT -GCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTC -ACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGT -TCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGC -CGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCG -CTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTG -GGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCC -CAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCT -GGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGC -GCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGA -GGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGA -GACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGA -GGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTG -AAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAAT -CCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCA -GTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAA -AAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGC -GGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCT -ACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGG -GAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATC -GCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGC -GGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGG -TCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAA -AAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAG -GAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACT -CCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCC -TGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAG -ACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGC -GTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGA -ACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGA -CAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCA -CTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCA -ACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCG -CCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGG -AGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTC -CGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCG -AGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACC -CCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAG -CTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAG -CCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGG -CCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATC -ACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAA -AAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGC -TGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCC -ACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGG -CTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGG -AGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATT -AGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAA -TCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGC -CTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAA -TCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAG -CCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGT -GGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCG -GGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAG -CGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTG -GGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATG -GTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGT -AATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTT -GCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCT -CAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCG -GGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTC -TCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACT -CGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAG -ATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGG -CGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTG -AGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATA -CAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGG -CAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGC -ACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCAC -GCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTC -GAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCG -GGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCT -TGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGG -CGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCA -GCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGG -CCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGC -GCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGG -CGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGA -CTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGG -CCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAA -ACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCC -CAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGT -GAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAA -AGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGG -ATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTAC -TAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGA -GGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGC -GCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGG -TGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTC -AGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAA -ATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGA -GAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCC -AGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTG -TAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGAC -CAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGT -GGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAAC -CCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACA -GAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACT -TTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAAC -ATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCC -TGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAG -GTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCG -TCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAG -GCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCC -GTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCT -ACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCC -GAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCC -GGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCAC -CTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAA -ATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTG -AGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCAC -TGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCT -CACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAG -TTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAG -CCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATC -GCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCT -GGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATC -CCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCC -TGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGG -CGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGG -AGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCG -AGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGG -AGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGT -GAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAA -TCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGC -AGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCA -AAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGG -CGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTC -TACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCG -GGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGAT -CGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCG -CGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAG -GTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACA -AAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCA -GGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCAC -TCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGC -CTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGA -GACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGG -CGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTG -AACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCG -ACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGC -ACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCC -AACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGC -GCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCG -GAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACT -CCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCC -GAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAAC -CCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCA -GCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGA -GCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAG -GCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGAT -CACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTA -AAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGG -CTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGC -CACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTG -GCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAG -GAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAAT -TAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGA -ATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAG -CCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTA -ATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCA -GCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGG -TGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCC -GGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGA -GCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTT -GGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACAT -GGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTG -TAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGT -TGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTC -TCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGC -GGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGT -CTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTAC -TCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGA -GATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGG -GCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCT -GAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAAT -ACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAG -GCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTG -CACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCA -CGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTT -CGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCC -GGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGC -TTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGG -GCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCC -AGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTG -GCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCG -CGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAG -GCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAG -ACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAG -GCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGA -AACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATC -CCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAG -TGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAA -AAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCG -GATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTA -CTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGG -AGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCG -CGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCG -GTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGT -CAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAA -AATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGG -AGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTC -CAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCT -GTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGA -CCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCG -TGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAA -CCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGAC -AGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCAC -TTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAA -CATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGC -CTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGA -GGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCC -GTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGA -GGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCC -CGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGC -TACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGC -CGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGC -CGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCA -CCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAA -AATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCT -GAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCA -CTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGC -TCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGA -GTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTA -GCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAAT -CGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCC -TGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAAT -CCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGACCAGC -CTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAATTAGCCGGGCGTGGTG -GCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGG -GAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGC -GAGACTCCGTCTCAAAAAGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGG -GAGGCCGAGGCGGGCGGATC ->TWO IUB ambiguity codes -cttBtatcatatgctaKggNcataaaSatgtaaaDcDRtBggDtctttataattcBgtcg -tactDtDagcctatttSVHtHttKtgtHMaSattgWaHKHttttagacatWatgtRgaaa -NtactMcSMtYtcMgRtacttctWBacgaaatatagScDtttgaagacacatagtVgYgt -cattHWtMMWcStgttaggKtSgaYaaccWStcgBttgcgaMttBYatcWtgacaYcaga -gtaBDtRacttttcWatMttDBcatWtatcttactaBgaYtcttgttttttttYaaScYa -HgtgttNtSatcMtcVaaaStccRcctDaataataStcYtRDSaMtDttgttSagtRRca -tttHatSttMtWgtcgtatSSagactYaaattcaMtWatttaSgYttaRgKaRtccactt -tattRggaMcDaWaWagttttgacatgttctacaaaRaatataataaMttcgDacgaSSt -acaStYRctVaNMtMgtaggcKatcttttattaaaaagVWaHKYagtttttatttaacct -tacgtVtcVaattVMBcttaMtttaStgacttagattWWacVtgWYagWVRctDattBYt -gtttaagaagattattgacVatMaacattVctgtBSgaVtgWWggaKHaatKWcBScSWa -accRVacacaaactaccScattRatatKVtactatatttHttaagtttSKtRtacaaagt -RDttcaaaaWgcacatWaDgtDKacgaacaattacaRNWaatHtttStgttattaaMtgt -tgDcgtMgcatBtgcttcgcgaDWgagctgcgaggggVtaaScNatttacttaatgacag -cccccacatYScaMgtaggtYaNgttctgaMaacNaMRaacaaacaKctacatagYWctg -ttWaaataaaataRattagHacacaagcgKatacBttRttaagtatttccgatctHSaat -actcNttMaagtattMtgRtgaMgcataatHcMtaBSaRattagttgatHtMttaaKagg -YtaaBataSaVatactWtataVWgKgttaaaacagtgcgRatatacatVtHRtVYataSa -KtWaStVcNKHKttactatccctcatgWHatWaRcttactaggatctataDtDHBttata -aaaHgtacVtagaYttYaKcctattcttcttaataNDaaggaaaDYgcggctaaWSctBa -aNtgctggMBaKctaMVKagBaactaWaDaMaccYVtNtaHtVWtKgRtcaaNtYaNacg -gtttNattgVtttctgtBaWgtaattcaagtcaVWtactNggattctttaYtaaagccgc -tcttagHVggaYtgtNcDaVagctctctKgacgtatagYcctRYHDtgBattDaaDgccK -tcHaaStttMcctagtattgcRgWBaVatHaaaataYtgtttagMDMRtaataaggatMt -ttctWgtNtgtgaaaaMaatatRtttMtDgHHtgtcattttcWattRSHcVagaagtacg -ggtaKVattKYagactNaatgtttgKMMgYNtcccgSKttctaStatatNVataYHgtNa -BKRgNacaactgatttcctttaNcgatttctctataScaHtataRagtcRVttacDSDtt -aRtSatacHgtSKacYagttMHtWataggatgactNtatSaNctataVtttRNKtgRacc -tttYtatgttactttttcctttaaacatacaHactMacacggtWataMtBVacRaSaatc -cgtaBVttccagccBcttaRKtgtgcctttttRtgtcagcRttKtaaacKtaaatctcac -aattgcaNtSBaaccgggttattaaBcKatDagttactcttcattVtttHaaggctKKga -tacatcBggScagtVcacattttgaHaDSgHatRMaHWggtatatRgccDttcgtatcga -aacaHtaagttaRatgaVacttagattVKtaaYttaaatcaNatccRttRRaMScNaaaD -gttVHWgtcHaaHgacVaWtgttScactaagSgttatcttagggDtaccagWattWtRtg -ttHWHacgattBtgVcaYatcggttgagKcWtKKcaVtgaYgWctgYggVctgtHgaNcV -taBtWaaYatcDRaaRtSctgaHaYRttagatMatgcatttNattaDttaattgttctaa -ccctcccctagaWBtttHtBccttagaVaatMcBHagaVcWcagBVttcBtaYMccagat -gaaaaHctctaacgttagNWRtcggattNatcRaNHttcagtKttttgWatWttcSaNgg -gaWtactKKMaacatKatacNattgctWtatctaVgagctatgtRaHtYcWcttagccaa -tYttWttaWSSttaHcaaaaagVacVgtaVaRMgattaVcDactttcHHggHRtgNcctt -tYatcatKgctcctctatVcaaaaKaaaagtatatctgMtWtaaaacaStttMtcgactt -taSatcgDataaactaaacaagtaaVctaggaSccaatMVtaaSKNVattttgHccatca -cBVctgcaVatVttRtactgtVcaattHgtaaattaaattttYtatattaaRSgYtgBag -aHSBDgtagcacRHtYcBgtcacttacactaYcgctWtattgSHtSatcataaatataHt -cgtYaaMNgBaatttaRgaMaatatttBtttaaaHHKaatctgatWatYaacttMctctt -ttVctagctDaaagtaVaKaKRtaacBgtatccaaccactHHaagaagaaggaNaaatBW -attccgStaMSaMatBttgcatgRSacgttVVtaaDMtcSgVatWcaSatcttttVatag -ttactttacgatcaccNtaDVgSRcgVcgtgaacgaNtaNatatagtHtMgtHcMtagaa -attBgtataRaaaacaYKgtRccYtatgaagtaataKgtaaMttgaaRVatgcagaKStc -tHNaaatctBBtcttaYaBWHgtVtgacagcaRcataWctcaBcYacYgatDgtDHccta -aagacYRcaggattHaYgtKtaatgcVcaataMYacccatatcacgWDBtgaatcBaata -cKcttRaRtgatgaBDacggtaattaaYtataStgVHDtDctgactcaaatKtacaatgc -gYatBtRaDatHaactgtttatatDttttaaaKVccYcaaccNcBcgHaaVcattHctcg -attaaatBtatgcaaaaatYMctSactHatacgaWacattacMBgHttcgaatVaaaaca -BatatVtctgaaaaWtctRacgBMaatSgRgtgtcgactatcRtattaScctaStagKga -DcWgtYtDDWKRgRtHatRtggtcgaHgggcgtattaMgtcagccaBggWVcWctVaaat -tcgNaatcKWagcNaHtgaaaSaaagctcYctttRVtaaaatNtataaccKtaRgtttaM -tgtKaBtRtNaggaSattHatatWactcagtgtactaKctatttgRYYatKatgtccgtR -tttttatttaatatVgKtttgtatgtNtataRatWYNgtRtHggtaaKaYtKSDcatcKg -taaYatcSRctaVtSMWtVtRWHatttagataDtVggacagVcgKWagBgatBtaaagNc -aRtagcataBggactaacacRctKgttaatcctHgDgttKHHagttgttaatgHBtatHc -DaagtVaBaRccctVgtgDtacRHSctaagagcggWYaBtSaKtHBtaaactYacgNKBa -VYgtaacttagtVttcttaatgtBtatMtMtttaattaatBWccatRtttcatagVgMMt -agctStKctaMactacDNYgKYHgaWcgaHgagattacVgtttgtRaSttaWaVgataat -gtgtYtaStattattMtNgWtgttKaccaatagNYttattcgtatHcWtctaaaNVYKKt -tWtggcDtcgaagtNcagatacgcattaagaccWctgcagcttggNSgaNcHggatgtVt -catNtRaaBNcHVagagaaBtaaSggDaatWaatRccaVgggStctDaacataKttKatt -tggacYtattcSatcttagcaatgaVBMcttDattctYaaRgatgcattttNgVHtKcYR -aatRKctgtaaacRatVSagctgtWacBtKVatctgttttKcgtctaaDcaagtatcSat -aWVgcKKataWaYttcccSaatgaaaacccWgcRctWatNcWtBRttYaattataaNgac -acaatagtttVNtataNaYtaatRaVWKtBatKagtaatataDaNaaaaataMtaagaaS -tccBcaatNgaataWtHaNactgtcDtRcYaaVaaaaaDgtttRatctatgHtgttKtga -aNSgatactttcgagWaaatctKaaDaRttgtggKKagcDgataaattgSaacWaVtaNM -acKtcaDaaatttctRaaVcagNacaScRBatatctRatcctaNatWgRtcDcSaWSgtt -RtKaRtMtKaatgttBHcYaaBtgatSgaSWaScMgatNtctcctatttctYtatMatMt -RRtSaattaMtagaaaaStcgVgRttSVaScagtgDtttatcatcatacRcatatDctta -tcatVRtttataaHtattcYtcaaaatactttgVctagtaaYttagatagtSYacKaaac -gaaKtaaatagataatSatatgaaatSgKtaatVtttatcctgKHaatHattagaaccgt -YaaHactRcggSBNgtgctaaBagBttgtRttaaattYtVRaaaattgtaatVatttctc -ttcatgBcVgtgKgaHaaatattYatagWacNctgaaMcgaattStagWaSgtaaKagtt -ttaagaDgatKcctgtaHtcatggKttVDatcaaggtYcgccagNgtgcVttttagagat -gctaccacggggtNttttaSHaNtatNcctcatSaaVgtactgBHtagcaYggYVKNgta -KBcRttgaWatgaatVtagtcgattYgatgtaatttacDacSctgctaaaStttaWMagD -aaatcaVYctccgggcgaVtaaWtStaKMgDtttcaaMtVgBaatccagNaaatcYRMBg -gttWtaaScKttMWtYataRaDBMaDataatHBcacDaaKDactaMgagttDattaHatH -taYatDtattDcRNStgaatattSDttggtattaaNSYacttcDMgYgBatWtaMagact -VWttctttgYMaYaacRgHWaattgRtaagcattctMKVStatactacHVtatgatcBtV -NataaBttYtSttacKgggWgYDtgaVtYgatDaacattYgatggtRDaVDttNactaSa -MtgNttaacaaSaBStcDctaccacagacgcaHatMataWKYtaYattMcaMtgSttDag -cHacgatcaHttYaKHggagttccgatYcaatgatRaVRcaagatcagtatggScctata -ttaNtagcgacgtgKaaWaactSgagtMYtcttccaKtStaacggMtaagNttattatcg -tctaRcactctctDtaacWYtgaYaSaagaWtNtatttRacatgNaatgttattgWDDcN -aHcctgaaHacSgaataaRaataMHttatMtgaSDSKatatHHaNtacagtccaYatWtc -actaactatKDacSaStcggataHgYatagKtaatKagStaNgtatactatggRHacttg -tattatgtDVagDVaRctacMYattDgtttYgtctatggtKaRSttRccRtaaccttaga -gRatagSaaMaacgcaNtatgaaatcaRaagataatagatactcHaaYKBctccaagaRa -BaStNagataggcgaatgaMtagaatgtcaKttaaatgtaWcaBttaatRcggtgNcaca -aKtttScRtWtgcatagtttWYaagBttDKgcctttatMggNttattBtctagVtacata -aaYttacacaaRttcYtWttgHcaYYtaMgBaBatctNgcDtNttacgacDcgataaSat -YaSttWtcctatKaatgcagHaVaacgctgcatDtgttaSataaaaYSNttatagtaNYt -aDaaaNtggggacttaBggcHgcgtNtaaMcctggtVtaKcgNacNtatVaSWctWtgaW -cggNaBagctctgaYataMgaagatBSttctatacttgtgtKtaattttRagtDtacata -tatatgatNHVgBMtKtaKaNttDHaagatactHaccHtcatttaaagttVaMcNgHata -tKtaNtgYMccttatcaaNagctggacStttcNtggcaVtattactHaSttatgNMVatt -MMDtMactattattgWMSgtHBttStStgatatRaDaagattttctatMtaaaaaggtac -taaVttaSacNaatactgMttgacHaHRttgMacaaaatagttaatatWKRgacDgaRta -tatttattatcYttaWtgtBRtWatgHaaattHataagtVaDtWaVaWtgStcgtMSgaS -RgMKtaaataVacataatgtaSaatttagtcgaaHtaKaatgcacatcggRaggSKctDc -agtcSttcccStYtccRtctctYtcaaKcgagtaMttttcRaYDttgttatctaatcata -NctctgctatcaMatactataggDaHaaSttMtaDtcNatataattctMcStaaBYtaNa -gatgtaatHagagSttgWHVcttatKaYgDctcttggtgttMcRaVgSgggtagacaata -aDtaattSaDaNaHaBctattgNtaccaaRgaVtKNtaaYggHtaKKgHcatctWtctDt -ttctttggSDtNtaStagttataaacaattgcaBaBWggHgcaaaBtYgctaatgaaatW -cDcttHtcMtWWattBHatcatcaaatctKMagtDNatttWaBtHaaaNgMttaaStagt -tctctaatDtcRVaYttgttMtRtgtcaSaaYVgSWDRtaatagctcagDgcWWaaaBaa -RaBctgVgggNgDWStNaNBKcBctaaKtttDcttBaaggBttgaccatgaaaNgttttt -tttatctatgttataccaaDRaaSagtaVtDtcaWatBtacattaWacttaSgtattggD -gKaaatScaattacgWcagKHaaccaYcRcaRttaDttRtttHgaHVggcttBaRgtccc -tDatKaVtKtcRgYtaKttacgtatBtStaagcaattaagaRgBagSaattccSWYttta -ttVaataNctgHgttaaNBgcVYgtRtcccagWNaaaacaDNaBcaaaaRVtcWMgBagM -tttattacgDacttBtactatcattggaaatVccggttRttcatagttVYcatYaSHaHc -ttaaagcNWaHataaaRWtctVtRYtagHtaaaYMataHYtNBctNtKaatattStgaMc -BtRgctaKtgcScSttDgYatcVtggaaKtaagatWccHccgKYctaNNctacaWctttt -gcRtgtVcgaKttcMRHgctaHtVaataaDtatgKDcttatBtDttggNtacttttMtga -acRattaaNagaactcaaaBBVtcDtcgaStaDctgaaaSgttMaDtcgttcaccaaaag -gWtcKcgSMtcDtatgtttStaaBtatagDcatYatWtaaaBacaKgcaDatgRggaaYc -taRtccagattDaWtttggacBaVcHtHtaacDacYgtaatataMagaatgHMatcttat -acgtatttttatattacHactgttataMgStYaattYaccaattgagtcaaattaYtgta -tcatgMcaDcgggtcttDtKgcatgWRtataatatRacacNRBttcHtBgcRttgtgcgt -catacMtttBctatctBaatcattMttMYgattaaVYatgDaatVagtattDacaacDMa -tcMtHcccataagatgBggaccattVWtRtSacatgctcaaggggYtttDtaaNgNtaaB -atggaatgtctRtaBgBtcNYatatNRtagaacMgagSaSDDSaDcctRagtVWSHtVSR -ggaacaBVaccgtttaStagaacaMtactccagtttVctaaRaaHttNcttagcaattta -ttaatRtaaaatctaacDaBttggSagagctacHtaaRWgattcaaBtctRtSHaNtgta -cattVcaHaNaagtataccacaWtaRtaaVKgMYaWgttaKggKMtKcgWatcaDatYtK -SttgtacgaccNctSaattcDcatcttcaaaDKttacHtggttHggRRaRcaWacaMtBW -VHSHgaaMcKattgtaRWttScNattBBatYtaNRgcggaagacHSaattRtttcYgacc -BRccMacccKgatgaacttcgDgHcaaaaaRtatatDtatYVtttttHgSHaSaatagct -NYtaHYaVYttattNtttgaaaYtaKttWtctaNtgagaaaNctNDctaaHgttagDcRt -tatagccBaacgcaRBtRctRtggtaMYYttWtgataatcgaataattattataVaaaaa -ttacNRVYcaaMacNatRttcKatMctgaagactaattataaYgcKcaSYaatMNctcaa -cgtgatttttBacNtgatDccaattattKWWcattttatatatgatBcDtaaaagttgaa -VtaHtaHHtBtataRBgtgDtaataMttRtDgDcttattNtggtctatctaaBcatctaR -atgNacWtaatgaagtcMNaacNgHttatactaWgcNtaStaRgttaaHacccgaYStac -aaaatWggaYaWgaattattcMaactcBKaaaRVNcaNRDcYcgaBctKaacaaaaaSgc -tccYBBHYaVagaatagaaaacagYtctVccaMtcgtttVatcaatttDRtgWctagtac -RttMctgtDctttcKtWttttataaatgVttgBKtgtKWDaWagMtaaagaaattDVtag -gttacatcatttatgtcgMHaVcttaBtVRtcgtaYgBRHatttHgaBcKaYWaatcNSc -tagtaaaaatttacaatcactSWacgtaatgKttWattagttttNaggtctcaagtcact -attcttctaagKggaataMgtttcataagataaaaatagattatDgcBVHWgaBKttDgc -atRHaagcaYcRaattattatgtMatatattgHDtcaDtcaaaHctStattaatHaccga -cNattgatatattttgtgtDtRatagSacaMtcRtcattcccgacacSattgttKaWatt -NHcaacttccgtttSRtgtctgDcgctcaaMagVtBctBMcMcWtgtaacgactctcttR -ggRKSttgYtYatDccagttDgaKccacgVatWcataVaaagaataMgtgataaKYaaat -cHDaacgataYctRtcYatcgcaMgtNttaBttttgatttaRtStgcaacaaaataccVg -aaDgtVgDcStctatatttattaaaaRKDatagaaagaKaaYYcaYSgKStctccSttac -agtcNactttDVttagaaagMHttRaNcSaRaMgBttattggtttaRMggatggcKDgWR -tNaataataWKKacttcKWaaagNaBttaBatMHtccattaacttccccYtcBcYRtaga -ttaagctaaYBDttaNtgaaaccHcaRMtKtaaHMcNBttaNaNcVcgVttWNtDaBatg -ataaVtcWKcttRggWatcattgaRagHgaattNtatttctctattaattaatgaDaaMa -tacgttgggcHaYVaaNaDDttHtcaaHtcVVDgBVagcMacgtgttaaBRNtatRtcag -taagaggtttaagacaVaaggttaWatctccgtVtaDtcDatttccVatgtacNtttccg -tHttatKgScBatgtVgHtYcWagcaKtaMYaaHgtaattaSaHcgcagtWNaatNccNN -YcacgVaagaRacttctcattcccRtgtgtaattagcSttaaStWaMtctNNcSMacatt -ataaactaDgtatWgtagtttaagaaaattgtagtNagtcaataaatttgatMMYactaa -tatcggBWDtVcYttcDHtVttatacYaRgaMaacaStaatcRttttVtagaDtcacWat -ttWtgaaaagaaagNRacDtttStVatBaDNtaactatatcBSMcccaSttccggaMatg -attaaWatKMaBaBatttgataNctgttKtVaagtcagScgaaaDggaWgtgttttKtWt -atttHaatgtagttcactaaKMagttSYBtKtaYgaactcagagRtatagtVtatcaaaW -YagcgNtaDagtacNSaaYDgatBgtcgataacYDtaaactacagWDcYKaagtttatta -gcatcgagttKcatDaattgattatDtcagRtWSKtcgNtMaaaaacaMttKcaWcaaSV -MaaaccagMVtaMaDtMaHaBgaacataBBVtaatVYaNSWcSgNtDNaaKacacBttta -tKtgtttcaaHaMctcagtaacgtcgYtactDcgcctaNgagagcYgatattttaaattt -ccattttacatttDaaRctattttWctttacgtDatYtttcagacgcaaVttagtaaKaa -aRtgVtccataBggacttatttgtttaWNtgttVWtaWNVDaattgtatttBaagcBtaa -BttaaVatcHcaVgacattccNggtcgacKttaaaRtagRtctWagaYggtgMtataatM -tgaaRttattttgWcttNtDRRgMDKacagaaaaggaaaRStcccagtYccVattaNaaK -StNWtgacaVtagaagcttSaaDtcacaacgDYacWDYtgtttKatcVtgcMaDaSKStV -cgtagaaWaKaagtttcHaHgMgMtctataagBtKaaaKKcactggagRRttaagaBaaN -atVVcgRcKSttDaactagtSttSattgttgaaRYatggttVttaataaHttccaagDtg -atNWtaagHtgcYtaactRgcaatgMgtgtRaatRaNaacHKtagactactggaatttcg -ccataacgMctRgatgttaccctaHgtgWaYcactcacYaattcttaBtgacttaaacct -gYgaWatgBttcttVttcgttWttMcNYgtaaaatctYgMgaaattacNgaHgaacDVVM -tttggtHtctaaRgtacagacgHtVtaBMNBgattagcttaRcttacaHcRctgttcaaD -BggttKaacatgKtttYataVaNattccgMcgcgtagtRaVVaattaKaatggttRgaMc -agtatcWBttNtHagctaatctagaaNaaacaYBctatcgcVctBtgcaaagDgttVtga -HtactSNYtaaNccatgtgDacgaVtDcgKaRtacDcttgctaagggcagMDagggtBWR -tttSgccttttttaacgtcHctaVtVDtagatcaNMaVtcVacatHctDWNaataRgcgt -aVHaggtaaaaSgtttMtattDgBtctgatSgtRagagYtctSaKWaataMgattRKtaa -catttYcgtaacacattRWtBtcggtaaatMtaaacBatttctKagtcDtttgcBtKYYB -aKttctVttgttaDtgattttcttccacttgSaaacggaaaNDaattcYNNaWcgaaYat -tttMgcBtcatRtgtaaagatgaWtgaccaYBHgaatagataVVtHtttVgYBtMctaMt -cctgaDcYttgtccaaaRNtacagcMctKaaaggatttacatgtttaaWSaYaKttBtag -DacactagctMtttNaKtctttcNcSattNacttggaacaatDagtattRtgSHaataat -gccVgacccgatactatccctgtRctttgagaSgatcatatcgDcagWaaHSgctYYWta -tHttggttctttatVattatcgactaagtgtagcatVgtgHMtttgtttcgttaKattcM -atttgtttWcaaStNatgtHcaaaDtaagBaKBtRgaBgDtSagtatMtaacYaatYtVc -KatgtgcaacVaaaatactKcRgtaYtgtNgBBNcKtcttaccttKgaRaYcaNKtactt -tgagSBtgtRagaNgcaaaNcacagtVtttHWatgttaNatBgtttaatNgVtctgaata -tcaRtattcttttttttRaaKcRStctcggDgKagattaMaaaKtcaHacttaataataK -taRgDtKVBttttcgtKaggHHcatgttagHggttNctcgtatKKagVagRaaaggaaBt -NatttVKcRttaHctaHtcaaatgtaggHccaBataNaNaggttgcWaatctgatYcaaa -HaatWtaVgaaBttagtaagaKKtaaaKtRHatMaDBtBctagcatWtatttgWttVaaa -ScMNattRactttgtYtttaaaagtaagtMtaMaSttMBtatgaBtttaKtgaatgagYg -tNNacMtcNRacMMHcttWtgtRtctttaacaacattattcYaMagBaacYttMatcttK -cRMtgMNccattaRttNatHaHNaSaaHMacacaVaatacaKaSttHatattMtVatWga -ttttttaYctttKttHgScWaacgHtttcaVaaMgaacagNatcgttaacaaaaagtaca -HBNaattgttKtcttVttaaBtctgctacgBgcWtttcaggacacatMgacatcccagcg -gMgaVKaBattgacttaatgacacacaaaaaatRKaaBctacgtRaDcgtagcVBaacDS -BHaaaaSacatatacagacRNatcttNaaVtaaaataHattagtaaaaSWccgtatWatg -gDttaactattgcccatcttHaSgYataBttBaactattBtcHtgatcaataSttaBtat -KSHYttWggtcYtttBttaataccRgVatStaHaKagaatNtagRMNgtcttYaaSaact -cagDSgagaaYtMttDtMRVgWKWtgMaKtKaDttttgactatacataatcNtatNaHat -tVagacgYgatatatttttgtStWaaatctWaMgagaRttRatacgStgattcttaagaD -taWccaaatRcagcagaaNKagtaaDggcgccBtYtagSBMtactaaataMataBSacRM -gDgattMMgtcHtcaYDtRaDaacggttDaggcMtttatgttaNctaattaVacgaaMMt -aatDccSgtattgaRtWWaccaccgagtactMcgVNgctDctaMScatagcgtcaactat -acRacgHRttgctatttaatgaattataYKttgtaagWgtYttgcHgMtaMattWaWVta -RgcttgYgttBHtYataSccStBtgtagMgtDtggcVaaSBaatagDttgBgtctttctc -attttaNagtHKtaMWcYactVcgcgtatMVtttRacVagDaatcttgctBBcRDgcaac -KttgatSKtYtagBMagaRtcgBattHcBWcaactgatttaatttWDccatttatcgagS -KaWttataHactaHMttaatHtggaHtHagaatgtKtaaRactgtttMatacgatcaagD -gatKaDctataMggtHDtggHacctttRtatcttYattttgacttgaaSaataaatYcgB -aaaaccgNatVBttMacHaKaataagtatKgtcaagactcttaHttcggaattgttDtct -aaccHttttWaaatgaaatataaaWattccYDtKtaaaacggtgaggWVtctattagtga -ctattaagtMgtttaagcatttgSgaaatatccHaaggMaaaattttcWtatKctagDtY -tMcctagagHcactttactatacaaacattaacttaHatcVMYattYgVgtMttaaRtga -aataaDatcaHgtHHatKcDYaatcttMtNcgatYatgSaMaNtcttKcWataScKggta -tcttacgcttWaaagNatgMgHtctttNtaacVtgttcMaaRatccggggactcMtttaY -MtcWRgNctgNccKatcttgYDcMgattNYaRagatHaaHgKctcataRDttacatBatc -cattgDWttatttaWgtcggagaaaaatacaatacSNtgggtttccttacSMaagBatta -caMaNcactMttatgaRBacYcYtcaaaWtagctSaacttWgDMHgaggatgBVgcHaDt -ggaactttggtcNatNgtaKaBcccaNtaagttBaacagtatacDYttcctNgWgcgSMc -acatStctHatgRcNcgtacacaatRttMggaNKKggataaaSaYcMVcMgtaMaHtgat -tYMatYcggtcttcctHtcDccgtgRatcattgcgccgatatMaaYaataaYSggatagc -gcBtNtaaaScaKgttBgagVagttaKagagtatVaactaSacWactSaKatWccaKaaa -atBKgaaKtDMattttgtaaatcRctMatcaaMagMttDgVatggMaaWgttcgaWatga -aatttgRtYtattaWHKcRgctacatKttctaccaaHttRatctaYattaaWatVNccat -NgagtcKttKataStRaatatattcctRWatDctVagttYDgSBaatYgttttgtVaatt -taatagcagMatRaacttBctattgtMagagattaaactaMatVtHtaaatctRgaaaaa -aaatttWacaacaYccYDSaattMatgaccKtaBKWBattgtcaagcHKaagttMMtaat -ttcKcMagNaaKagattggMagaggtaatttYacatcWaaDgatMgKHacMacgcVaaca -DtaDatatYggttBcgtatgWgaSatttgtagaHYRVacaRtctHaaRtatgaactaata -tctSSBgggaaHMWtcaagatKgagtDaSatagttgattVRatNtctMtcSaagaSHaat -aNataataRaaRgattctttaataaagWaRHcYgcatgtWRcttgaaggaMcaataBRaa -ccagStaaacNtttcaatataYtaatatgHaDgcStcWttaacctaRgtYaRtataKtgM -ttttatgactaaaatttacYatcccRWtttHRtattaaatgtttatatttgttYaatMca -RcSVaaDatcgtaYMcatgtagacatgaaattgRtcaaYaaYtRBatKacttataccaNa -aattVaBtctggacaagKaaYaaatatWtMtatcYaaVNtcgHaactBaagKcHgtctac -aatWtaDtSgtaHcataHtactgataNctRgttMtDcDttatHtcgtacatcccaggStt -aBgtcacacWtccNMcNatMVaVgtccDYStatMaccDatggYaRKaaagataRatttHK -tSaaatDgataaacttaHgttgVBtcttVttHgDacgaKatgtatatNYataactctSat -atatattgcHRRYttStggaactHgttttYtttaWtatMcttttctatctDtagVHYgMR -BgtHttcctaatYRttKtaagatggaVRataKDctaMtKBNtMtHNtWtttYcVtattMc -gRaacMcctNSctcatttaaagDcaHtYccSgatgcaatYaaaaDcttcgtaWtaattct -cgttttScttggtaatctttYgtctaactKataHacctMctcttacHtKataacacagcN -RatgKatttttSaaatRYcgDttaMRcgaaattactMtgcgtaagcgttatBtttttaat -taagtNacatHgttcRgacKcBBtVgatKttcgaBaatactDRgtRtgaNacWtcacYtt -aaKcgttctHaKttaNaMgWgWaggtctRgaKgWttSttBtDcNtgtttacaaatYcDRt -gVtgcctattcNtctaaaDMNttttNtggctgagaVctDaacVtWccaagtaacacaNct -gaScattccDHcVBatcgatgtMtaatBgHaatDctMYgagaatgYWKcctaatNaStHa -aaKccgHgcgtYaaYtattgtStgtgcaaRtattaKatattagaWVtcaMtBagttatta -gNaWHcVgcaattttDcMtgtaRHVYtHtctgtaaaaHVtMKacatcgNaatttMatatg -ttgttactagWYtaRacgataKagYNKcattataNaRtgaacKaYgcaaYYacaNccHat -MatDcNgtHttRaWttagaaDcaaaaaatagggtKDtStaDaRtaVtHWKNtgtattVct -SVgRgataDaRaWataBgaagaaKtaataaYgDcaStaNgtaDaaggtattHaRaWMYaY -aWtggttHYgagVtgtgcttttcaaDKcagVcgttagacNaaWtagtaataDttctggtt -VcatcataaagtgKaaaNaMtaBBaattaatWaattgctHaVKaSgDaaVKaHtatatat -HatcatSBagNgHtatcHYMHgttDgtaHtBttWatcgtttaRaattgStKgSKNWKatc -agDtctcagatttctRtYtBatBgHHtKaWtgYBgacVVWaKtacKcDttKMaKaVcggt -gttataagaataaHaatattagtataatMHgttYgaRttagtaRtcaaVatacggtcMcg -agtaaRttacWgactKRYataaaagSattYaWgagatYagKagatgSaagKgttaatMgg -tataatgttWYttatgagaaacctNVataatHcccKtDctcctaatactggctHggaSag -gRtKHaWaattcgSatMatttagaggcYtctaMcgctcataSatatgRagacNaaDagga -VBagaYttKtacNaKgtSYtagttggaWcatcWttaatctatgaVtcgtgtMtatcaYcg -tRccaaYgDctgcMgtgtWgacWtgataacacgcgctBtgttaKtYDtatDcatcagKaV -MctaatcttgVcaaRgcRMtDcgattaHttcaNatgaatMtactacVgtRgatggaWttt -actaaKatgagSaaKggtaNtactVaYtaaKRagaacccacaMtaaMtKtatBcttgtaa -WBtMctaataaVcDaaYtcRHBtcgttNtaaHatttBNgRStVDattBatVtaagttaYa -tVattaagaBcacggtSgtVtatttaRattgatgtaHDKgcaatattKtggcctatgaWD -KRYcggattgRctatNgatacaatMNttctgtcRBYRaaaHctNYattcHtaWcaattct -BtMKtVgYataatMgYtcagcttMDataVtggRtKtgaatgccNcRttcaMtRgattaac -attRcagcctHtWMtgtDRagaKaBtgDttYaaaaKatKgatctVaaYaacWcgcatagB -VtaNtRtYRaggBaaBtgKgttacataagagcatgtRattccacttaccatRaaatgWgD -aMHaYVgVtaSctatcgKaatatattaDgacccYagtgtaYNaaatKcagtBRgagtcca -tgKgaaaccBgaagBtgSttWtacgatWHaYatcgatttRaaNRgcaNaKVacaNtDgat -tgHVaatcDaagcgtatgcNttaDataatcSataaKcaataaHWataBtttatBtcaKtK -tatagttaDgSaYctacaRatNtaWctSaatatttYaKaKtaccWtatcRagacttaYtt -VcKgSDcgagaagatccHtaattctSttatggtKYgtMaHagVaBRatttctgtRgtcta -tgggtaHKgtHacHtSYacgtacacHatacKaaBaVaccaDtatcSaataaHaagagaat -ScagactataaRttagcaaVcaHataKgDacatWccccaagcaBgagWatctaYttgaaa -tctVNcYtttWagHcgcgcDcVaaatgttKcHtNtcaatagtgtNRaactttttcaatgg -WgBcgDtgVgtttctacMtaaataaaRggaaacWaHttaRtNtgctaaRRtVBctYtVta -tDcattDtgaccYatagatYRKatNYKttNgcctagtaWtgaactaMVaacctgaStttc -tgaKVtaaVaRKDttVtVctaDNtataaaDtccccaagtWtcgatcactDgYaBcatcct -MtVtacDaaBtYtMaKNatNtcaNacgDatYcatcgcaRatWBgaacWttKttagYtaat -tcggttgSWttttDWctttacYtatatWtcatDtMgtBttgRtVDggttaacYtacgtac -atgaattgaaWcttMStaDgtatattgaDtcRBcattSgaaVBRgagccaaKtttcDgcg -aSMtatgWattaKttWtgDBMaggBBttBaatWttRtgcNtHcgttttHtKtcWtagHSt -aacagttgatatBtaWSaWggtaataaMttaKacDaatactcBttcaatatHttcBaaSa -aatYggtaRtatNtHcaatcaHtagVtgtattataNggaMtcttHtNagctaaaggtaga -YctMattNaMVNtcKtactBKcaHHcBttaSagaKacataYgctaKaYgttYcgacWVtt -WtSagcaacatcccHaccKtcttaacgaKttcacKtNtacHtatatRtaaatacactaBt -ttgaHaRttggttWtatYagcatYDatcggagagcWBataagRtacctataRKgtBgatg -aDatataSttagBaHtaatNtaDWcWtgtaattacagKttcNtMagtattaNgtctcgtc -ctcttBaHaKcKccgtRcaaYagSattaagtKataDatatatagtcDtaacaWHcaKttD -gaaRcgtgYttgtcatatNtatttttatggccHtgDtYHtWgttatYaacaattcaWtat -NgctcaaaSttRgctaatcaaatNatcgtttaBtNNVtgttataagcaaagattBacgtD -atttNatttaaaDcBgtaSKgacgtagataatttcHMVNttgttBtDtgtaWKaaRMcKM -tHtaVtagataWctccNNaSWtVaHatctcMgggDgtNHtDaDttatatVWttgttattt -aacctttcacaaggaSaDcggttttttatatVtctgVtaacaStDVaKactaMtttaSNa -gtgaaattaNacttSKctattcctctaSagKcaVttaagNaVcttaVaaRNaHaaHttat -gtHttgtgatMccaggtaDcgaccgtWgtWMtttaHcRtattgScctatttKtaaccaag -tYagaHgtWcHaatgccKNRtttagtMYSgaDatctgtgaWDtccMNcgHgcaaacNDaa -aRaStDWtcaaaaHKtaNBctagBtgtattaactaattttVctagaatggcWSatMaccc -ttHttaSgSgtgMRcatRVKtatctgaaaccDNatYgaaVHNgatMgHRtacttaaaRta -tStRtDtatDttYatattHggaBcttHgcgattgaKcKtttcRataMtcgaVttWacatN -catacctRataDDatVaWNcggttgaHtgtMacVtttaBHtgagVttMaataattatgtt -cttagtttgtgcDtSatttgBtcaacHattaaBagVWcgcaSYttMgcttacYKtVtatc -aYaKctgBatgcgggcYcaaaaacgNtctagKBtattatctttKtaVttatagtaYtRag -NtaYataaVtgaatatcHgcaaRataHtacacatgtaNtgtcgYatWMatttgaactacR -ctaWtWtatacaatctBatatgYtaagtatgtgtatSttactVatcttYtaBcKgRaSgg -RaaaaatgcagtaaaWgtaRgcgataatcBaataccgtatttttccatcNHtatWYgatH -SaaaDHttgctgtccHtggggcctaataatttttctatattYWtcattBtgBRcVttaVM -RSgctaatMagtYtttaaaaatBRtcBttcaaVtaacagctccSaaSttKNtHtKYcagc -agaaaccccRtttttaaDcDtaStatccaagcgctHtatcttaDRYgatDHtWcaaaBcW -gKWHttHataagHacgMNKttMKHccaYcatMVaacgttaKgYcaVaaBtacgcaacttt -MctaaHaatgtBatgagaSatgtatgSRgHgWaVWgataaatatttccKagVgataattW -aHNcYggaaatgctHtKtaDtctaaagtMaatVDVactWtSaaWaaMtaHtaSKtcBRaN -cttStggtBttacNagcatagRgtKtgcgaacaacBcgKaatgataagatgaaaattgta -ctgcgggtccHHWHaaNacaBttNKtKtcaaBatatgctaHNgtKcDWgtttatNgVDHg -accaacWctKaaggHttgaRgYaatHcaBacaatgagcaaattactgtaVaaYaDtagat -tgagNKggtggtgKtWKaatacagDRtatRaMRtgattDggtcaaYRtatttNtagaDtc -acaaSDctDtataatcgtactaHttatacaatYaacaaHttHatHtgcgatRRttNgcat -SVtacWWgaaggagtatVMaVaaattScDDKNcaYBYaDatHgtctatBagcaacaagaa -tgagaaRcataaKNaRtBDatcaaacgcattttttaaBtcSgtacaRggatgtMNaattg -gatatWtgagtattaaaVctgcaYMtatgatttttYgaHtgtcttaagWBttHttgtctt -attDtcgtatWtataataSgctaHagcDVcNtaatcaagtaBDaWaDgtttagYctaNcc -DtaKtaHcttaataacccaRKtacaVaatNgcWRaMgaattatgaBaaagattVYaHMDc -aDHtcRcgYtcttaaaWaaaVKgatacRtttRRKYgaatacaWVacVcRtatMacaBtac -tggMataaattttHggNagSctacHgtBagcgtcgtgattNtttgatSaaggMttctttc -ttNtYNagBtaaacaaatttMgaccttacataattgYtcgacBtVMctgStgMDtagtaR -ctHtatgttcatatVRNWataDKatWcgaaaaagttaaaagcacgHNacgtaatctttMR -tgacttttDacctataaacgaaatatgattagaactccSYtaBctttaataacWgaaaYa -tagatgWttcatKtNgatttttcaagHtaYgaaRaDaagtaggagcttatVtagtctttc -attaaaatcgKtattaRttacagVaDatgcatVgattgggtctttHVtagKaaRBtaHta -aggccccaaaaKatggtttaMWgtBtaaacttcactttKHtcgatctccctaYaBacMgt -cttBaBaNgcgaaacaatctagtHccHtKttcRtRVttccVctttcatacYagMVtMcag -aMaaacaataBctgYtaatRaaagattaaccatVRatHtaRagcgcaBcgDttStttttc -VtttaDtKgcaaWaaaaatSccMcVatgtKgtaKgcgatatgtagtSaaaDttatacaaa -catYaRRcVRHctKtcgacKttaaVctaDaatgttMggRcWaacttttHaDaKaDaBctg -taggcgtttaHBccatccattcNHtDaYtaataMttacggctNVaacDattgatatttta -cVttSaattacaaRtataNDgacVtgaacataVRttttaDtcaaacataYDBtttaatBa -DtttYDaDaMccMttNBttatatgagaaMgaNtattHccNataattcaHagtgaaggDga -tgtatatatgYatgaStcataaBStWacgtcccataRMaaDattggttaaattcMKtctM -acaBSactcggaatDDgatDgcWctaacaccgggaVcacWKVacggtaNatatacctMta -tgatagtgcaKagggVaDtgtaacttggagtcKatatcgMcttRaMagcattaBRaStct -YSggaHYtacaactMBaagDcaBDRaaacMYacaHaattagcattaaaHgcgctaaggSc -cKtgaaKtNaBtatDDcKBSaVtgatVYaagVtctSgMctacgttaacWaaattctSgtD -actaaStaaattgcagBBRVctaatatacctNttMcRggctttMttagacRaHcaBaacV -KgaataHttttMgYgattcYaNRgttMgcVaaacaVVcDHaatttgKtMYgtatBtVVct -WgVtatHtacaaHttcacgatagcagtaaNattBatatatttcVgaDagcggttMaagtc -ScHagaaatgcYNggcgtttttMtStggtRatctacttaaatVVtBacttHNttttaRca -aatcacagHgagagtMgatcSWaNRacagDtatactaaDKaSRtgattctccatSaaRtt -aaYctacacNtaRtaactggatgaccYtacactttaattaattgattYgttcagDtNKtt -agDttaaaaaaaBtttaaNaYWKMBaaaacVcBMtatWtgBatatgaacVtattMtYatM -NYDKNcKgDttDaVtaaaatgggatttctgtaaatWtctcWgtVVagtcgRgacttcccc -taDcacagcRcagagtgtWSatgtacatgttaaSttgtaaHcgatgggMagtgaacttat -RtttaVcaccaWaMgtactaatSSaHtcMgaaYtatcgaaggYgggcgtgaNDtgttMNg -aNDMtaattcgVttttaacatgVatgtWVMatatcaKgaaattcaBcctccWcttgaaWH -tWgHtcgNWgaRgctcBgSgaattgcaaHtgattgtgNagtDttHHgBttaaWcaaWagc -aSaHHtaaaVctRaaMagtaDaatHtDMtcVaWMtagSagcttHSattaacaaagtRacM -tRtctgttagcMtcaBatVKtKtKacgagaSNatSactgtatatcBctgagVtYactgta -aattaaaggcYgDHgtaacatSRDatMMccHatKgttaacgactKtgKagtcttcaaHRV -tccttKgtSataatttacaactggatDNgaacttcaRtVaagDcaWatcBctctHYatHa -DaaatttagYatSatccaWtttagaaatVaacBatHcatcgtacaatatcgcNYRcaata -YaRaYtgattVttgaatgaVaactcRcaNStgtgtattMtgaggtNttBaDRcgaaaagc -tNgBcWaWgtSaDcVtgVaatMKBtttcgtttctaaHctaaagYactgMtatBDtcStga -ccgtSDattYaataHctgggaYYttcggttaWaatctggtRagWMaDagtaacBccacta -cgHWMKaatgatWatcctgHcaBaSctVtcMtgtDttacctaVgatYcWaDRaaaaRtag -atcgaMagtggaRaWctctgMgcWttaagKBRtaaDaaWtctgtaagYMttactaHtaat -cttcataacggcacBtSgcgttNHtgtHccatgttttaaagtatcgaKtMttVcataYBB -aKtaMVaVgtattNDSataHcagtWMtaggtaSaaKgttgBtVtttgttatcatKcgHac -acRtctHatNVagSBgatgHtgaRaSgttRcctaacaaattDNttgacctaaYtBgaaaa -tagttattactcttttgatgtNNtVtgtatMgtcttRttcatttgatgacacttcHSaaa -ccaWWDtWagtaRDDVNacVaRatgttBccttaatHtgtaaacStcVNtcacaSRttcYa -gacagaMMttttgMcNttBcgWBtactgVtaRttctccaaYHBtaaagaBattaYacgat -ttacatctgtaaMKaRYtttttactaaVatWgctBtttDVttctggcDaHaggDaagtcg -aWcaagtagtWttHtgKtVataStccaMcWcaagataagatcactctHatgtcYgaKcat -cagatactaagNSStHcctRRNtattgtccttagttagMVgtatagactaactctVcaat -MctgtttgtgttgccttatWgtaBVtttctggMcaaKgDWtcgtaaYStgSactatttHg -atctgKagtagBtVacRaagRtMctatgggcaaaKaaaatacttcHctaRtgtDcttDat -taggaaatttcYHaRaaBttaatggcacKtgctHVcaDcaaaVDaaaVcgMttgtNagcg -taDWgtcgttaatDgKgagcSatatcSHtagtagttggtgtHaWtaHKtatagctgtVga -ttaBVaatgaataagtaatVatSttaHctttKtttgtagttaccttaatcgtagtcctgB -cgactatttVcMacHaaaggaatgDatggKtaHtgStatattaaSagctWcctccRtata -BaDYcgttgcNaagaggatRaaaYtaWgNtSMcaatttactaacatttaaWttHtatBat -tgtcgacaatNgattgcNgtMaaaKaBDattHacttggtRtttaYaacgVactBtaBaKt -gBttatgVttgtVttcaatcWcNctDBaaBgaDHacBttattNtgtDtatttVSaaacag -gatgcRatSgtaSaNtgBatagttcHBgcBBaaattaHgtDattatDaKaatBaaYaaMa -ataaataKtttYtagtBgMatNcatgtttgaNagtgttgtgKaNaSagtttgaSMaYBca -aaacDStagttVacaaaaactaaWttBaagtctgtgcgtMgtaattctcctacctcaNtt -taaccaaaaVtBcacataacaccccBcWMtatVtggaatgaWtcaaWaaaaaaaaWtDta -atatRcctDWtcctaccMtVVatKttaWaaKaaatataaagScHBagaggBaSMtaWaVt -atattactSaaaKNaactatNatccttgaYctattcaaaVgatttYHcRagattttaSat -aggttattcVtaaagaKgtattattKtRttNcggcRgtgtgtWYtaacHgKatKgatYta -cYagDtWcHBDctctgRaYKaYagcactKcacSaRtBttttBHKcMtNtcBatttatttt -tgSatVgaaagaWtcDtagDatatgMacaacRgatatatgtttgtKtNRaatatNatgYc -aHtgHataacKtgagtagtaacYttaNccaaatHcacaacaVDtagtaYtccagcattNt -acKtBtactaaagaBatVtKaaHBctgStgtBgtatgaSNtgDataaccctgtagcaBgt -gatcttaDataStgaMaccaSBBgWagtacKcgattgaDgNNaaaacacagtSatBacKD -gcgtataBKcatacactaSaatYtYcDaactHttcatRtttaatcaattataRtttgtaa -gMcgNttcatcBtYBagtNWNMtSHcattcRctttttRWgaKacKttgggagBcgttcgc -MaWHtaatactgtctctatttataVgtttaBScttttaBMaNaatMacactYtBMggtHa -cMagtaRtctgcatttaHtcaaaatttgagKtgNtactBacaHtcgtatttctMaSRagc -agttaatgtNtaaattgagagWcKtaNttagVtacgatttgaatttcgRtgtWcVatcgt -taaDVctgtttBWgaccagaaagtcSgtVtatagaBccttttcctaaattgHtatcggRa -ttttcaaggcYSKaagWaWtRactaaaacccBatMtttBaatYtaagaactSttcgaaSc -aatagtattgaccaagtgttttctaacatgtttNVaatcaaagagaaaNattaaRtttta -VaaaccgcaggNMtatattVctcaagaggaacgBgtttaacaagttcKcYaatatactaa -ccBaaaSggttcNtattctagttRtBacgScVctcaatttaatYtaaaaaaatgSaatga -tagaMBRatgRcMcgttgaWHtcaVYgaatYtaatctttYttatRaWtctgBtDcgatNa -tcKaBaDgatgtaNatWKctccgatattaacattNaaacDatgBgttctgtDtaaaMggt -gaBaSHataacgccSctaBtttaRBtcNHcDatcDcctagagtcRtaBgWttDRVHagat -tYatgtatcWtaHtttYcattWtaaagtctNgtStggRNcgcggagSSaaagaaaatYcH -DtcgctttaatgYcKBVSgtattRaYBaDaaatBgtatgaHtaaRaRgcaSWNtagatHa -acttNctBtcaccatctMcatattccaSatttgcgaDagDgtatYtaaaVDtaagtttWV -aagtagYatRttaagDcNgacKBcScagHtattatcDaDactaaaaaYgHttBcgaDttg -gataaaKSRcBMaBcgaBSttcWtgNBatRaccgattcatttataacggHVtaattcaca -agagVttaaRaatVVRKcgWtVgacctgDgYaaHaWtctttcacMagggatVgactagMa -aataKaaNWagKatagNaaWtaaaatttgaattttatttgctaaVgaHatBatcaaBWcB -gttcMatcgBaaNgttcgSNaggSaRtttgHtRtattaNttcDcatSaVttttcgaaaaa -ttgHatctaRaggSaNatMDaaatDcacgattttagaHgHaWtYgattaatHNSttatMS -gggNtcKtYatRggtttgtMWVtttaYtagcagBagHaYagttatatggtBacYcattaR -SataBatMtttaaatctHcaaaSaaaagttNSaaWcWRccRtKaagtBWtcaaattSttM -tattggaaaccttaacgttBtWatttatatWcDaatagattcctScacctaagggRaaYt -aNaatgVtBcttaaBaacaMVaaattatStYgRcctgtactatcMcVKatttcgSgatRH -MaaaHtagtaaHtVgcaaataatatcgKKtgccaatBNgaaWcVttgagttaKatagttc -aggKDatDtattgaKaVcaKtaataDataataHSaHcattagttaatRVYcNaHtaRcaa -ggtNHcgtcaaccaBaaagYtHWaaaRcKgaYaaDttgcWYtataRgaatatgtYtgcKt -aNttWacatYHctRaDtYtattcBttttatcSataYaYgttWaRagcacHMgtttHtYtt -YaatcggtatStttcgtRSattaaDaKMaatatactaNBaWgctacacYtgaYVgtgHta -aaRaaRgHtagtWattataaaSDaaWtgMattatcgaaaagtaYRSaWtSgNtBgagcRY -aMDtactaacttaWgtatctagacaagNtattHggataatYttYatcataDcgHgttBtt -ctttVttgccgaaWtaaaacgKgtatctaaaaaNtccDtaDatBMaMggaatNKtatBaa -atVtccRaHtaSacataHattgtttKVYattcataVaattWtcgtgMttcttKtgtctaa -cVtatctatatBRataactcgKatStatattcatHHRttKtccaacgtgggtgRgtgaMt -attattggctatcgtgacMtRcBDtcttgtactaatRHttttaagatcgVMDStattatY -BtttDttgtBtNttgRcMtYtgBacHaWaBaatDKctaagtgaaactaatgRaaKgatcc -aagNaaaatattaggWNtaagtatacttttKcgtcggSYtcttgRctataYcttatataa -agtatattaatttataVaacacaDHatctatttttKYVatHRactttaBHccaWagtact -BtcacgaVgcgttRtttttttSVgtSagtBaaattctgaHgactcttgMcattttagVta -agaattHctHtcaDaaNtaacRggWatagttcgtSttgaDatcNgNagctagDgatcNtt -KgttgtaDtctttRaaYStRatDtgMggactSttaDtagSaVtBDttgtDgccatcacaM -attaaaMtNacaVcgSWcVaaDatcaHaatgaattaMtatccVtctBtaattgtWattat -BRcWcaatgNNtactWYtDaKttaaatcactcagtRaaRgatggtKgcgccaaHgaggat -StattYcaNMtcaBttacttatgagDaNtaMgaaWtgtttcttctaHtMNgttatctaWW -atMtBtaaatagDVatgtBYtatcggcttaagacMRtaHScgatatYgRDtcattatSDa -HggaaataNgaWSRRaaaBaatagBattaDctttgHWNttacaataaaaaaatacggttt -gHgVtaHtWMttNtBtctagtMcgKMgHgYtataHaNagWtcaacYattaataYRgtaWK -gaBctataaccgatttaHaNBRaRaMtccggtNgacMtctcatttgcaattcWgMactta -caaDaaNtactWatVtttagccttMaatcagVaagtctVaaDaBtattaattaYtNaYtg -gattaKtaKctYaMtattYgatattataatKtVgDcttatatNBtcgttgtStttttMag -aggttaHYSttcKgtcKtDNtataagttataagSgttatDtRttattgttttSNggRtca -aKMNatgaatattgtBWtaMacctgggYgaSgaagYataagattacgagaatBtggtRcV -HtgYggaDgaYaKagWagctatagacgaaHgtWaNgacttHRatVaWacKYtgRVNgVcS -gRWctacatcKSactctgWYtBggtataagcttNRttVtgRcaWaaatDMatYattaact -ttcgaagRatSctgccttgcRKaccHtttSNVagtagHagBagttagaccaRtataBcca -taatSHatRtcHagacBWatagcaMtacaRtgtgaaBatctKRtScttccaNaatcNgta -atatWtcaMgactctBtWtaaNactHaaaaRctcgcatggctMcaaNtcagaaaaacaca -gtggggWttRttagtaagaVctVMtcgaatcttcMaaaHcaHBttcgattatgtcaDagc -YRtBtYcgacMgtDcagcgaNgttaataatagcagKYYtcgtaBtYctMaRtaRtDagaa -aacacatgYaBttgattattcgaaNttBctSataaMataWRgaHtttccgtDgaYtatgg -tDgHKgMtatttVtMtVagttaRatMattRagataaccctKctMtSttgaHagtcStcta -tttccSagatgttccacgaggYNttHRacgattcDatatDcataaaatBBttatcgaHtN -HaaatatDNaggctgaNcaaggagttBttMgRagVatBcRtaWgatgBtSgaKtcgHttt -gaatcaaDaHttcSBgHcagtVaaSttDcagccgttNBtgttHagYtattctttRWaaVt -SttcatatKaaRaaaNacaVtVctMtSDtDtRHRcgtaatgctcttaaatSacacaatcg -HattcaWcttaaaatHaaatcNctWttaNMcMtaKctVtcctaagYgatgatcYaaaRac -tctaRDaYagtaacgtDgaggaaatctcaaacatcaScttcKttNtaccatNtaNataca -tttHaaDHgcaDatMWaaBttcRggctMaagctVYcacgatcaDttatYtaatcKatWat -caatVYtNagatttgattgaYttttYgacttVtcKaRagaaaHVgDtaMatKYagagttN -atWttaccNtYtcDWgSatgaRgtMatgKtcgacaagWtacttaagtcgKtgatccttNc -ttatagMatHVggtagcgHctatagccctYttggtaattKNaacgaaYatatVctaataM -aaaYtgVtcKaYtaataacagaatHcacVagatYWHttagaaSMaatWtYtgtaaagNaa -acaVgaWtcacNWgataNttcaSagctMDaRttgNactaccgataMaaatgtttattDtc -aagacgctDHYYatggttcaagccNctccttcMctttagacBtaaWtaWVHggaaaaNat -ttaDtDtgctaaHHtMtatNtMtagtcatttgcaaaRatacagRHtatDNtgtDgaatVg -tVNtcaaatYBMaaaagcaKgtgatgatMgWWMaHttttMgMagatDtataaattaacca -actMtacataaattgRataatacgBtKtaataattRgtatDagDtcRDacctatRcagag -cSHatNtcaScNtttggacNtaaggaccgtgKNttgttNcttgaaRgYgRtNtcagttBc -ttttcHtKtgcttYaaNgYagtaaatgaatggWaMattBHtatctatSgtcYtgcHtaat -tHgaaMtHcagaaSatggtatgccaHBtYtcNattWtgtNgctttaggtttgtWatNtgH -tgcDttactttttttgcNtactKtWRaVcttcatagtgSNKaNccgaataaBttataata -YtSagctttaaatSttggctaaKSaatRccgWHgagDttaaatcatgagMtcgagtVtaD -ggaBtatttgDacataaacgtagYRagBWtgDStKDgatgaagttcattatttaKWcata -aatWRgatataRgttRacaaNKttNtKagaaYaStaactScattattaacgatttaaatg -DtaattagatHgaYataaactatggggatVHtgccgtNgatNYcaStRtagaccacWcaM -tatRagHgVactYtWHtcttcatgatWgagaKggagtatgaWtDtVtNaNtcgYYgtaaa -ctttaDtBactagtaDctatagtaatatttatatataacgHaaaRagKattSagttYtSt -atatatagtcttaaaaMtcatgttcaaDactgRttctaagagDtatttttagcgacttgt -gRtgNctgSgRaaaaatgcaMtYtDcatcaaYKttHcatSWgaaaatDataggttatgBD -MtgttataacaaYSgagttacgttatgtDStttaaatctcgWKtcSacgagagaSgttat -BMDgtcggtgtgcgaNtaSHBatBtttVMgVcagaNatcaDDaKMtMYtatagaBccctc -tDtgtatttatatKNtgggtatgtRaacttgaWaaYgcaHatccctggtttStatMtcgc -MtaaaWKttMVtWctVtgttaKDWctgWaVttaDVatgKtagagtcatctaKWgtaaMtt -SacBaMattaKaaHDataattgWtgttttgtcatBacacgtStacaaagtNctNtgtgat -cHtWttcKaagagttttaaaaWacgRacatctNatVStgaatDHgttWcgtRKcatatat -ctcaNttaaBDcctgaaaaaDtaYaHaKttNtaYVaVtttaDtctacttctWttaactaa -ttttMagWcaatcccNKYtBaacatgttgaKgKcgcBHaatDMttatatcSWacatDatR -cWaMtDgatBctHgScttaaaHtSgKtDtttattgtRStWgttccatatttcacWttcat -attgtaHVgaBtacaMtgMaaagDaataactDatattagMaNBagcttcattcgtaaKtg -tatttcacMtgBaVtaattStcttagtYgtgtcgccttKatgggtgaWaataggaatacM -MagaSKRttBgatgacRtgMtagaSRataggtatcaccgaNaaaWSWacDgatacttgat -tagcttgtgVMttatYctaRgHVcDtVRRtSaMtcaVtVtatcaYaHatattaaVaatct -aBtgtacRatNtatttgaYatSaHctaNgNtYtYaYagattVgatcRtaacgYggtgtat -KttaatMagatgRtatatgHaKccHaaaaYtgaacgaWaNgtYHgacagaYtctaVtacc -cgatttttaaagcDttatNRgattKaaattttcatctaatgccgcaataataattgttat -YtagtRNtaagttggtHaKttWMtDKgatSagBYcgRggtWaVaattHtatgtaaaMgSa -aagataaKaaKgttDttttRaagaacaWRcaacDgtgttaatattaKtatcaWacacatt -tVtctgatHRcagtttNcaaatcNctNttttataactWacBBttgBttaaaRaWtBKaaa -cgtatcRcaMaatgYacaaaagtgBataStWYtggtatgacaKWtctSgcKHgtcNaMNc -ataSatattgactacMcataattNVtDaRccaaatcagttttYttagYaacgtaatMtMV -atNgKaaMaaBgattaKttatDaBcttKtccttttacDagaYtacHgttggacaaaVaat -agtYatcataSgatcaaWVttcgaatgaccctccttNtaSBWaatttDttttcaatatYg -gctatDcttatNctttagDcMttcaacWaaNattSYgctttcaHcRaattaataaaatcV -ccRaattactctaMaVRattacagtgRcDtcgtgctcttNtWVtacagtHtatHaBDtcW -ggtgctcaaRHtatgtDgacStgcaaaVKtagttataatactaatatgtagScaatRSac -aattgtattgcagatHHtgBcaatKKtaaMMcaRcgactatKBaMaYatgKatttDaaNt -RatattgtatWttagcaaaaacaWgcacaaHcataYtDaHgttataaSacgcagggggtY -atgcKctaaaHgcVgctBDaVttccStagNgcSgtatgVYaMatcaWRBtVtgYttgtgR -cYttcgctgaacNttgtgtctattWttttcctagMtagaWtaKgatStScatMaBtaSta -SactattYNatctgtacRatYDaatgatgatatgaatYaaaaSHttaaYMaWtDcaNHaB -caYtgVgcatVaacattMRatBtaatttaDacRtagtaaaNYVSMtcagaaDtttDHtRc -YatacSNKaaMcHgatBaaVttactggBYgaYatttttgcDacHctWatcgtagagtact -cattDggtcatKaSgctttatttagtDtRBacttaWYaaaattttgaccttaaWtaatgc -RgccacttMtaggKtcBtgacgaHctttatcgtcStatMHDNagattatNagVaaaWcgg -aaaYcaVactDYactaStattgBHtcYctgggtacatataaYcgaYagaggaggacaVat -acHRtYtctgtaVgaYcNgaaaNatacVgcNgtaatttDcatttttcaacttSNcaaDat -VYctSgcaccttagMgacgcttgaSttaaaatagttaggRHttaaacMatagcaWgMgag -tcgctagtgtKgactaaHttattaWgcaaaaaaSatatgcgttaBNggttaYVatgaact -ttttgccatataaataRatSaBctagttataBccgaaacaagatacttaattttgaHgHM -gtaaKctttaYtaaRacBMtBaYgaBaaacaYtVtagcRgWatHaWagattWSacStMHa -tttaDagacaatcgtgtKtttggaMtgtWtgtgcaaNaaaaWtKaaBcMWtcttctatga -cVgagcgaggHaYYtttWgSaaYYaWtRYHHaMDtctttacaatggaaMctataagcttB -cgHcNWaatttgtatatYtStatctagcactgtVttccagaaattaDtttaRtVataBtt -WagcatDMVactYtgcatWtttgaaMggKaatgaaaaHtataDtgYcMggVaaatSMHtt -tgVttaYaWaataRttgttaYttattttRtWtataaBgtDtttatatcVgaaBcaDtatg -tcaDagaWtgaYtWctcVagctcagctatatagcRVtcaKtaataatHgNaccgaaaatV -HBaatattcgttaVYttatttctBYaatKaagaccVStttcattgaMagSaaaaccccWK -caaNtMYacctaDStagaaatttatcatVgtcaatacccKattgtaaagtggWgtatatV -tagBcttDaBacaattWtDYKtatRKggStRtaaaWatBtaagtaattDaaaaBRacWta -agtacaSttaaatccgctaaccKaattgVWttDattatttattKaMtcYtMRWagMtcgK -gBagacgggVaaNaaatgctKcgtaataaKtaaagtccWcttHMatSYgataaatDttBa -HccattgBttSgaaHYtaataaaMtgaagatgtttBgRcattaRaDHcttBgaMaWaaVM -MattaatttgtgBRctattgKMagNcMtatttaaaWttgaaacatWgcScgYYDYgttYt -VtattgcKcWtagcggtgBaSctaKatacaaVtcaRDccccgtgttBgKgggtHagcgaa -ttaaagMMttScggtDttttaHcSaagaacactcacactBcVgaKNaDHacacttatSag -aattSKHtcagtataaatKaaHtgaaRagaaVcBtaHtaaatcgatcWcaRtaaaattta -WttaagtcaggRctgaWcttDttgactttaVSaaaatggtaWDaRMtBtaaaaaKatBga -tMtctatatcaVaMgatttgNagtDRttDatcttttaMtYaaatcggagttctctaYatN -tagaNcgMMactacHcaagtaaaatStaSaacaHcacSgggtNKatggaaagcggaaKgg -gtaYtacSgccgBaggcRacgtVgDtggaMcYaaaMatggacgYStKKatgaBcaaRtSt -ccSagcRccgccgcSDtgcggBDgaDtBtSSggacMttttaWcatcMatgtNMBWgataa -tcaaVtgaataataaNatgcaaNttNctgacDMcaHccgatgKgWVttccaStggattct -cDacttttttctttaaNcWaMWccWKWttgaaaMctDaaBactRtVattttBtcMaNttW -cKacagttKSttaYaWSactHSaBtHgatgttacatgcatatMtttgtaacScWHBatHa -ctggatatatctgagMgRSatctaaSttaVagcaRcttggaYaatKHtagBBactattcg -taaagaagttgtVcgatgaVatHMtcaggtcgKSgWattgaaaVctccVgtDcaaatgaa -HgMYactcaMatatatattNVttWtWaatttacRagKataaaNtttacaaWgMVactatt -aSgaggVaaagVtaccDRHaaataRaHaRgcattMttcaatcaKaaataDcaDKtctcga -ggBggacctDtttatHacWVaWgatDctaNaNcgKatcMtcMaatBtttggacgtgataa -tagaaacRactcBtattttaKtgSaaggKtaggRaVtatagcccaNRttaccttSMaaga -tcggDacNBatWcgaactacactaactNBtaStgVtNagcatctaVtaKatKgaBtcgtt -tWaagWMgagRaNatHaaaaDtacagacaBagtgcaHaNatctcBccNttaagttDgaat -aaNtcgctaacRBgtaatSttaatatgcataacccaSattKcccttDttggtcaatgggt -tWaacgatacattBtgMaYgaRttatgatKaKgtattDtKWgataacgNBtaccgaKWat -cttcttKtgtcttagcattcctWcaaHgagtatDMSgKtcagcttgVHaKcttDaataaa -VaatttDgtgaaataaRgtcaVaatacttagtVatatgggcatgtDDtMtgtatBggatt -HtgcVtgtgatcaaSattatKYVaacSNNttNWcgaHttKDaaMYHatcgttaattaStt -gctWaacHtaKBtaaaaKHttcRWgaaWcRtBtttggBcDtgtacNttaagcKtaHgtag -aaaaRttgaaacatagtWRaacYggtaaatcgctYaBtWDRtgttgSctaaKatNcattg -tgtMttatccatatagctSacgccSNaaactacgNtgtgcttMatSKtcaaBaNaaacat -aacagaaatagtagctcNcatcVgaagStaataVcDKKttcagDHDtattctaatgaggg -RgBMctatacaagYactctMaaagtcgctttctcgtgaattatNcgatMtttaggcBaaa -tctNtactaaRKtgKactattgtcatatgtacgagttMaaHSSgHgBatatcgcaSaata -aaWgaagtatagaHgcttctttatgaccWaatttaRtaDaatttaatcgaaattgattMc -atcaWaMtaWaKactttctBacactatNgtccttaWgtctgaccKatStaKtgagtacgg -gcgcgtYNtatttagacctctKcatgatKWStcaataactaWgMSgHtgatctttttgtc -gacgtSacttaYgcctWctcctctacaagVtttMaBactWVaccaYtgtSgcgttattcK -tatStgaaKaccgNaataaHtatWtYtRacggcaDaScagcagHaYWRtRNcDtHtcVWt -ggaataaaYttgVaNtgttagtYttgtagSaaatDgaggccDcgBRYStattatttaagg -ccgHgggYRaaccMaagttatSttctttagcMtgcgMtgaSagaNaDagttSatgattWa -tttagtDgcttgagtgMKaYWaYccagcaHatKctaKaDgctagacttattgattaaYtt -atcttattattStaattWaRaYBWagYaatatgttRgScttgBagDaWgcgtgcVDaggc -ttgtctaDRKacttgcaKBWRtaaVaSctKtacttMaaSVaWWcgSaNtttSWgtcggtc -acttggVVtgagaataaataaDttgaaccaaaaMttaaaagaaaaaaaatcNBtatMgcc -WagcaNgaVaNaaaaaaYaMgttaWtatHaagtNtacgacaBtMMattttWNaRtaaata -gYaScKattacagctVKBtWNSKgYtYgtWatHaVatDaaatWgDatcctggSRagagta -aaaMgatttRtaHacatggtaKagVcctgatgaMtaaYgatgtattattttHggBaccaD -ctctggNNtYaatctVttgVtRtVcRacttNctttataggHSRtaRacaaattaacHaHg -tgttgtttcBtBtatWtgtattttgcKagMcaaagaMtattagtStagcBacYaaHcagV -gWtgtttcgtgDHaVtagDatcRaRtggtWtaactgcacgaggaaaRttSDaaVaSttaa -aaacSMttactaNtcaacaattDtacttttYatVSacYtWtMttaattatcKtcttctat -caKDtctStSaaacggtYccatgtgagagtWtagWKgcaBaaaaKttgNactaatcgagg -cWtcDDaaaaaacactHattaattcactatYttaagacactaKaagRtRataaattttca -tHggtaataaatgataHtggctaacBacDgtaatattRtYgtDNDBgKtcaggcHatttt -gHNgWtaatttccgactactgacatVNttYYgactcgctctatttagaMcgggatHcgtt -tatBaDSagBaaaagRttBggttaaBactVHgatgaatttattcaaaattgcacttcDga -cttYcVttactVtttatBaKHagaWgtgaatggBtaaSggcagacNcttaDttVgMtWag -attggVatttacHtctNcMatacttSatMagcttgtNcYaaScaYactcKctKtagScSt -cagtttcatWaatggtgagaggHaggggcaacgcRKtaRcMaNtHaatRaRaaactVtBt -gttaatRtWWcaaagKttccaaKaaatacgVttcacaaacgcggtgagaRaatggtgDMW -atcWVScacaaaDaggaaHtgttSMaaaaaccYccDBtatYgtMagcSagaccaVcctcg -gtVWaaagttatcNaagataataSaataaaKccgtaDtYttatYcttHttaagKcMctaa -atggaatRgaaaVaaVtcKYaggatWcaBtDaggDatccttcYNtgcSMRgaRtNgaatc -gttRttatDVMtagctttacatDVtatatatcagctaDagMtataccYgaggYaaatgDa -aaatSgctctgatgtttVaaBcctgataKtagaaaccaKatatgttaDtgaDtatagata -atacagtaDtatcNtgtDMtYcattRVtctataNtWttggNaSgtMgaaYctctDggHtg -gHDccaccacKKaaacaaaatRatttccctttaagcRattMHctattHaRtataVattgg -atcSttaaHaHgaaHNDtacattSaaggDatttcaaaYgctBcatattaaaKagtgccca -tSctcgatRtaaaMtgWactttNMaWctYgRatDggaactcDcaattaKaactgagtatc -tataagYaaaSRctggtacWtttccWtaYRtKHattatagWtKttaNgcDtatHacccat -taatttataacgctMgaagtaacaacagMgtaYHYVtKMHtacMgKcaaatctgRYataN -tcgttcaatacggWtMcaatYcBWaagYtVaDNagtatagDaaNtaaaYtttcYWttttS -tgggataaMgatattagaaYtNctcttcBagactaYDcgtacHDWccKaHgttcttHgVg -gVDttatcatKaMttttacWaaSattctatagaHaggKaDagBtaaagtcYccattgtYc -atctaNgRgVtgaagtDKttatBKcggDtattRYgHccgtgcgBNMtttVRgacaYctSc -taRacgtagagccgtacRaagtaHKagStSttttgYSatattaaaWHaaWagttDKaaNa -NHaaHttaYcttMtcaaatgKttBtSgtccaaVaattSaacgttgNattgatatNctaWt -VcagtactKcWacgVagggHaaRgaDaatcMttattaataacaBMaaVtgYtKgRgHact -gtactatcBaMtVggtagKcYtHtBSaattagtaatgMcaVVagYYgWtactttccaaSt -tDgaaMaMttcacttYtRgacttcagcttWtttagtgataMaattaagVtagaatatKat -aagtagttaagHMRaDattaHaaVcctDtagtcVYcaataaYcNttNaaaHctcaRaatt -tcaNRgatSHgVatagctRtcatgaBttMaaagRtcgHVtgRgStgatttgtagaKagaR -WRctgNaHYgaaatBctgtttRttNWagaccgagKgtgcggHKVttaatattaatataat -aDtaNcctacaaRgcaNMctctgaaSHWWHcttagtNagtWgWaaKtYaNgcBattatcc -aaaSctRRHKaNtKcBgtgagaDRWBttactaaattSMctatatagaaYacDgatttccV -taagRtgRataatatagtctttttatgtMgtcaacaaNtaaaaactctWtagaVaaaDta -attatagtBStcgaatDtgattVaatMtcaDattVKWaagatagggttgtMRSgtcYgWM -aatgNtagtcBttagtttctctWaaMtVgctWgSgtHagaSagactagKtagWggcattt -HgttgacaaactcggggHggcWBgVgtatgggagVgagtcVcBtDctttagtctaagVWt -HtgtttaScatacMBtKgattatRtgtttgtctttDggcHaBtRtgtaataNataattta -taWctgaYWataStcHaatcRtaaVagDWaSatagtaccNDgaagtatacgttttacgac -gKRtattgDctatRRattVtStaaactagatgVatttagaMaSaaaattVtatYtgttgt -RMagtHaatttSttaaYNaggWagtgcacgaMcactgHgtgtgggHMgtKacttaaYgtc -gcatcSatattgBaagtttacMtYagSatttatttaVtaaDtaWaHcgNatactgactHt -ggWtataDcDScatactcStcDtgtcgtgtatgaggtHaaNKgDattgcBccaagKgtat -gacKSMtttttgttcaaatcaaYtagtaSatgDaaaMccKNaMaatagaataagcaatta -ttataaMgagtgaSgtctNYttattHaNaYYtcDDtaatNRgtatttaaYtaaatcactH -VaHcStccttcccaaVatcVggatKtatgRaaDBgaYtttacttYggactSDtaBcaaNg -gggtattatattBDcttagagYNMatBgttYaagactMatgttRgatacccgtaacacBH -tatKacWgatRcHttaattYtKtStccaaatVDcaNKHHaaataatagtagtatcttgct -NDggVaVVtaVaRaaagSaccgttctcMtVtgNBgtDtttctYgttactBctcRtStWtW -DScMtcWSaRatgaataRHctaNtcStctYtWacagatgtatYBtHaHWBtacggtDcaa -BtatcaggtcaVattaNctactgaaaatWaDgactNWtMtggagaattBaataYcMWYcg -atMYatWtgattSatgaRtDaRgccagtSttatatRaBtattRcWtagtVgaagttMcta -ttatatDttaggtctKtgtgtBagacgttatRKtgatctatttBtataactgataacKcg -gagtgHgtVttcttgtKDgcDtaYatBDatcaatattgttNtaBacatcgcNcaKcaWcR -ataWcVgtacgScaWgttcggHcMttcRccatgaRStYgNacagatacYacWWtggNaDc -WagttHatMaNaatNtcDMDcMaKgHNatScVgatKWatatgNRgtccgYgaagattDHg -tMtcHaSNaaattBatRagtaaatttacaagHWtKatcaagtccHtYcctgttKDMSgta -ctactVctgacaaaaHgatatacataatKtStHgctScSatNatacaYttaaWHtctgaa -tYtagtHtKaggccWBaStaDctaagagNtaatcaatcgttNgaYDaagtaaaaHataga -atcgcgBaYaBgaacSaaWaaaaactccgcMttHttYgtaagaMctKBtacSagattcBa -aWtaattttacRttatcgaRtacaRHgtgRagaaBcttaVgacVDgggaatVatagaact -RRtacgYttNattVHgaHttacaaaaaaaYtcRWtgtgattatgccaSDtttatKWgaat -atSNDgattttaacgtcSRtatggttcttcBtWtttMtBtMScttaHatBattHacYtaY -acattcgttKgtcStSctcKtatatttcaKSgagcttccaacaccRDtttDaccattata -tSgtcWtVaaagttgtagccattDtYaatattDaccatcVDaaRccagttttgtcHacMa -ttcHgaNcatgttKcVttcctgtgcSataaatattgaKtctaWctMRaKggtaYcaagtt -DttcgttacRtatgatggHNaWMtKttcatattaaDaSaBaaaMtMatBgKtttgHtHac -taatcatcgtWaatKaaWcaWtcctVttaaNaggaaaagtaaagaDctNttaDBaBgata -gMgaataacRcYggatcRaaaHaagatRDtVRactaYagttcaccaaWtctcSSaaatcS -KattctggDgaacagDtaDagacagtgtaattcaStYttNaStgtaHgccttaScatMRc -accWtcatttatRtaagatWtNataaWtMNtDVgWttgcWgtgaRttttRgWcttMtcta -HacaaYtKctgaBagtRagacttDatNttaaaDgRtatNcHatcSDgtBatcttacVcYa -cNgaattaacgagttgYgacttDattatacBattMgctagcctagatVcaactNttccta -atgtDaacgYaNatagMatSWtYBaaaRtgMtatSRgaataYaScaVgtaScMagatNNt -ttacaaHBaWtNtRtctaaacDaaaaWMcaNtcVaDNcagaDtgcWKYgagttaHtgcDY -ataaacataBaWWtcggtatgtgaaScaacctttRNatcgttaaagcaDctaatgcBatt -tacaattVaMgSMMtccYaaaBYtggattttcataWttgBtatDtBgactaatgtccWaa -HataaScHttWttDtcgtcaagMctMDtaaaatRtBaaaacaatgtcagcatBgNNBVtt -ttttcBacWtttWtSWWtgaaaaSacgBtaaataaagtcDStaagaactgttaatYatgD -ctattactgaHtaaatStHaagacaKtagDtaaHaDgttccaaDtaaggacactctDggc -gtDagtcWaHgRcHgDgaSctttattgtcttttccttRYaDgNactaaatcaWggcNSBa -gttttatatStKgtcRtgattaaggtcaSBttaacaaKatgggatcaaattgRgcBagtN -tcgDcatttWcctttgtNagDgctgcatttactttgtgtcaBgSatttNHaMcggcagSc -tcKDtWBaagSagWatggYtVatSRgKagattgaVatKttcgatYatKYSgDaacNtcVg -tttaWataWtgVctgcgSggMgatccatgagttgtWcatYWWcctVcNHagtNtgtKttt -gatcaacttaSttattgatNcatWaVgNHcagStVHcggHacaaDttgDttWcaaRaKga -aatKaattagtaWacattgaaatgtgaatgacagtgaRVtaaYagYtcggcatMttgaag -gDgagDRcaKgHtacacaaaMcaBtagHactgKaatRtNttcttcatcatNgYgStggac -tatgSMttgKtDaDgacRRgtWaVattgatttaagYctatatagactaagaggtatWtat -aaactaYaHRctStgKWcgtRtKtYtYtagacgattRaaYBtaStcttaWataatcHtta -taRcactgagtgggagccaattctcDtgDaggHcDRVaVVggaaBtRttaataaRRttgt -aagKNcaVWWgtatacctgatcttBtcttRgaWcaVRKcagttSacttagcgtKtgtYWa -tatcgNttcKaccacacVKctgattBtggacgtctgacaDtWKttattttgMBgKaacaD -ataattWtBtBRtVtacataaatatttgtWtttatagtDtgcctagctHYaatgcaNaaR -caatVtacctgggggKtagBgagaBgRaaNttttMtMagMtgtgattNctcNaKggWtMa -tcttagWgtaatatatNctaYBggKaataBattYtaattataVtggNtcgtgtctaatta -aacctHtacaaactDctDtctgatatgMtgataacWctgtgYSaaNScgDYaWtatDatM -KgcaatttctgNcgtHtaWtagatatcYBttaattactcaaaVattYRWtatttDtaNMY -MttgattataatgcgNggWaatYagttgBagNcaagaaaDtRgtaaaagctgcatctagc -ttaVgtBttatagcKMSaattYtHcMaBttcagtcttgKatgVSVttKgttttttagtgt -DHgNggtcaVtatttaacNtgaatatgctatMcatgaaaBtgBSaWctaataaattatYt -tagtaDtaccggaatgagtaattggatttaacBtctSMgWYtgKgattacgRctctccaa -tgtaggcctgaNaatScgYataaBBacaKtHtttcatgaaHtgBtagaKHVtacctVtca -accaDaaWNHNaatgataattgatgWcagggtcMBtgSgRataHctMctgMHHtKaBtaa -MtMgataaRWtagYtgaaMaSgctYtgcgaaHatDtatgtcWRatKatatYDcBgNtRaR -acattMcagaHgaaagRccgcgWttggSatBagagcHgYtatctVtcatYaaVRtcaSac -aMYDcgRtcaaWgaRgataMtaaaacaggtgtaaYcattgWgDHcWgttaVatttgcatc -taatccacaaagaagSatgcgtagRgagtHDgaVcgtgcttatggMttttcatKSctNac -HcctMaKRatttgatctaaatgHaaScataataatgtttgtgtHaVcaaaaNHaaaatcg -ctgSVtattVttagaaNWcacagtgKtatgattHcYcttgDaWVataBatBttttWtaac -tNaattttctttaaYHaMtttaaaccgStcHaVBaatcRacaaWactgtagVKtNRtcct -agcWaatNgctKccttctcDaBDcatYHatatgcaataaBaagaatgDMttaHcaaYYtc -actgttRtgacRaacctaWtBtBMagBctaaBaWtgatgVtttattataggttaattgta -atYcaRtVctcttgcacSaaMaatactRSgcataKcagcaVNKttcgSatcaaactaatt -DtaHtNaVtgttttttaWVtatNccagWttcgtatBcgttVctcBttaaaaMSaDattKR -cctttcataHaattaatWaaataKcaHVaggaatataBYKHVtgVcVgtcHcttccgcct -attDtMMgWaacttgWttYtttcMcgtcctaaVHtgWtggtgacKtcaWaYMttacttag -VWtacgSatatcgWcKaaatHKaaaYttgtagtcaacWtttggtcaagttgaaBBaSHac -VcgYgttWBSRWggtattttaYDtHatattcgatNttacaaaaVacaMccaaYStaataR -ttVtcttagaVKaacaWcgccgtRatcatctaaatccMcctttaMggccHgYcDgaKcta -tgMRYBagcaNDtgMtcRttgtgHaRttacatgaWcDtgctgtataggNggtgaatagBg -agYNtatcagKtHcatBatgVKgaHWagattRDatatcgYcHagRtaatgWtcStagcVa -tNaaaaKttgRaRBYNgtaaDtStaVRgcMccatMWaaattBDatttaatttataaHtag -tVVaDRMKBtaacaatttttttDaRSgaaKDtVaBatcagtaaMttaagcctRgaNVggg -ttcataatagNatcctacactacgcatgtcggaYgtaKcatggattgactttHtaattWN -RaaWYggttcaaaggaaNtaatgcHcaaaattBtagcttattcaagVtatttWgcctaKt -atBttDYcattagDacKVaYNccgYaYRaaMaattRaagaHtatgcttgcRagcgctSaa -tagaaRacaRacSccagcacVMataatHgRtagcgaKgYRaDcVWSDVgRaMgcDgtaat -tttaYttggtaaWcttKDaaYtatMRcgKccYcagtYcBgRccattcaKtgaSSRtactg -acgHtgtaaaaBatWgcaMcBcYcgccagactcttcSatYattgatgaNccaaaaWaKat -VgcaggtWtBcgttaRMagcaaagtgttcacatataaagaHWtKatctacttatatcacY -RaaVagataagtaattttgatgtBctaataggtaRtaaHaattgtaRcStYSYaWRgMta -caHcNSttVNScattNKaaKgBtagtgatYcaaaStactggttggggaBggtNtgtcaaW -BaYVSNgtaataBNtagtatatcacMcScccVcgtVRRtttNcKaSRNaNtHRttattta -ttgacaatggSaBagataaccgttcctaDNaattgctVtatNtHtatagSccaagctKtt -aaacaaattattgtSHgMWgStttNaccattBMYatRtccStNgttgaaBcctVagcaaa -atgatattcRaBccMWaagKtttttcMtgaRYNaataDttgttWRttattggHtNtataa -tggttgtStYgaMcYVtcattaggtaatVcaNggaRtNataMWcctcYgcgagagRgcHM -gcWtgaYtVSttgDaacgaaaatMttYWtWttcctgaKNttatttattRaattaagaccM -KtttcWgtcaBagKSaWaaacaNtaYaDtBNaaagWtHgacaaagtgVtcatKcgcaatV -aactatgcgaaactccNctatatMgactatttatSaaVttNttRttagHtccKtHtaaaN -atttYVctaatttaaaatHWaNtSacgaaaHggaaatcacagVYcctaattcMNtgtYtg -agttatttaBtcRgBHNacBtactctagaacgcKaaDWYYgcattactVagaYtgaVVcg -caNctttBagKRcSgaaatttgtatccattgtggHcaatRtaVtaSaBtcYYcatcgtgt -cHaVttaHattctgtcaBSNYaKBBattaatggctgtHatattgtBacDcBgatttaaaN -tggaaaaYtNcaKagRRtRgttRtMtWgggatcNtacacctgtWKagatataaYVMtaaD -taaacctctgtgtgccttScacWaggaYacttttKacgtttgtgataKYagaYaVatcWc -SattaMcatBYttYaaatgStKagWattKtttaWgtagaaSgtRattcSaDagVaMatta -ttYaagccSgcNaaDgaaSaggtaNgtWactaWcgHctgaNatttttcaatgtaMHSWaR -tggtaNtaHBtttWWaaatattcVtBtctStWtaWMaBcatttcDagttDtttatatgtt -WBtNaYatcccSgtgagcgaRYtBtagaDacBtaagaataWactaaaagKtaKaWaataa -cKcccgDtagccaaagcggaatcgctSRtacKgcactacccHaactMgtgccaBaRaaaB -VtcgSacRKtttStgatcaaHgKtaaKaccHaccacccKttgagcttcSttttKKcgacB -gggtYMaatcBStcgDBtMcataWtaWaMtgaataagaaDatccSYDtgBatgactBaVt -aagatctcNMgtcaWKtgcWggcgatacgtgtttatttWaDaNWBNaaNtNttcaaatag -taatScgHtMWttgttgaBaDtgNatSaagtttHttaNaNKaattKatttgatcgtVcat -gaatatBtttctaacKaNttVttSagccatRtatatcactcHHatctWSKttaMacaaDa -ttccaRaYttttagttaatattcctYaacVactgctMcgagcaMYtttgaagctagtKgN -WttgaaaMatcaMcttcSVatcaatgtNactaaBagatagagtDMgtNtNWatttSaHac -tagaaaDggtaaaaNctMaatagtaHgacgMaaacMtacatHtaSagaHatYDccagtBt -gaWatcYtVaagataattgatcgacctgcaacgttttattacNMWNcattataDVDacta -tattatYattttgcgaagtgagYVtagYaWaHaatctgWttttatgcHaacgttaccDaK -tatagaccaDDttaacgtHBaacatccgtYaBtVtNccaaataaaatVactDttSKtcMt -DSgaagctaMtatattgattactgtNaagNBcagHaDattaaaWttacacaaatactcaa -tSDatagctcaDttWactttgaStaaDtagatSaaDtgtaatKtgVataggaagWSaaaa -KatttaaagtttgcgtaaagcccggNWaacatacatgttctaRcaHttVtcattatctag -ttttNcataaacDttWaagVtNYtaggctttggtatgagaWgtactNaVatcactVttBK -cttaaccttcMtatcggtaataYaMaYggttgtcaaagSWHctaRMSatVcggactMata -tccgaatcttttttcgagtccagtttgaMtcgcatcaaKagtattRMaaaKDBttDNcca -tttttaaBNtVtccgtaatgaKgtcagMVSattatttaWaattttaHNcaaMaHttgtgg -ctattctacDtgaagattatcgacaaVRHttcSaSaatactNHWaaNcgtWaWgaccgRS -ttNtHtcttcKatYatatBaagtcgctBtgagccatatScctKaagaaKDaWactWagBg -ctgattBagKtgaaataBaaaaagSacScaaagagtagcgaDaYtaMcaYcKtaataMat -ttttaactttgYgtcgaaggacgcHctBcgYgaaVacRYagagBaaYgtagattgcgagt -caagtStDagatBgtgaccctaSWtctDgactaSHttctWatWttctaWtatctYacact -gBWatKKctgtatYgacaaHSatYSaNgSagtatagatgagtatttatgaccMaatgtaH -tStaWttgYagccaWattcagtBaYtaaNaBtaNatactggcttWcaagatDctacggaN -ctatcacatSgKgattgacgacccccgagtNDtattgagaaatattaatcVttNKtaaWt -YacgSNcBHgttgWtatgtttcgccaactKaattaRgacgNataatctacaacKgttBat -YatNMSaaaNtctKgacttatgcttatKtcaVtVcagDaataattYgNtRtHaagcaata -HcacaVgtaNNHtHDatgttaMNtggWagSVaRttcMVDtcttWgtRttctacKaaVttc -VcgcatcctHRKtattSgttSacgaagtcccRDVaacBWagtgYtKtgattgSgaBtgcc -BtcaKacaDatacttHatcattNatttacgtcagtgaggcBtaRNaRcaSgcatattatS -tatgctYcacgtattcattaaRtgStcttWgtattKtSYttNaHaRtNYcRaYtVtggtD -cKcttctactaMcacggcMtacgcttctatatHtaatggcattMDtaaMaKattgaagtB -aaKMVMNacKaDtttKNcgagctaaagtccMMtgagaagVaataatggcaWaaaaVaBgt -aSaVgaaaSaaaataDttVtBccaNagcSBgaMaDaVaVYYRVBgttYMtagtaactDta -agWaattBtattttMDYHtSaStScRaKatattacacctMttgNBKtcRtRggNagtYMa -ttaaatMctYgaatgcKHagSggaaaaBcaggtHtatWcatcgtStagMcctcatgatta -WRcStcgWtgRgttttcctaacatcgctcgDDtRaatatMgtcMtHtMaDYatgDattta -tagctKDtYHaaaaattaSatatctggtctttattttatMtgtYttgtcatactcaaVcY -BgatgSctKtYcctWaRaataWcMgNgcgggagtcttRMgactataHaHtgctNtVaatc -aaccacgSRaDtgKtaaMSKgtaaaaWaKtttVagSDtaaaaaatgttYattttNagMHa -aRtNgBttWattatatgcttatatcatttatKtKaaaagctRaaatcgcYgacgNtacNt -ccVtSaaatttcDVctaatacWgcaMtcttSaaWaaaWagtagtaattaactagRttaVc -SaaatataacHgHatWaattggaagtgcgSSgaaVtgYgSttccatWVataatcgaatat -gHtRcgtBttcttaaggatatgttgtBcNtaatgtcacVatactgaaatMBttRRcRatc -catagagggacatcgccWttagttgWttatKagtaaaagHtttccttSatVatKtgagca -atttattaaYVattcaaattctgSattRaMtgaatMgttattattacaNcggVagcctta -aKgccYcaaDattWtggMcttMacWttccMVgtgaattctDaBYgacttKYtBacatgct -DcRaaKaaRaatatctttagKcKtaactttaatNaaggctgScacctYgcgcaaaccaHt -tVHcBaDgtaatHaHVaaatMgttggtSatHtNNaaVagtgtacaataaagacgKttcaa -aWVacagctcacWHaatcctgtBNWtaNMKcVcVSWtSgcaattctgKtVVaaacaRaat -tgatRcgBacaKacVccVMactagcgMNaaactgataDaSgagaatVHaatVSVtccgga -tgRgtagRatttgtaactaBataVaggcaagHgaaSMSaKgctRagcStNcatttVgcta -tacttcNDtcaKBDcaHtDcaatagttHttattMBgagctgtaaagtMgatStStcagat -atYcBtataacRcaggRaaaggtaWSatKgatatgagcgtgMYatcagcatVttSgaaaa -aatatatgttYttcattatacataatVcacgattataDggttBtRaagtHMtatagaDgN -ttggDaKctBcaaRcgattcgtgccttacaaaWattYWVcaaWagDattgaaagggaaga -HattBtatVggtaHtWtaMagtccagaKttSatatcaStDtgWaagtKWaggtatttaWa -aRcattaatStgaaVtacggaacatKctacatHtaaaBtcNWatttBBaNatRcDattcg -aactataaattataactcagtSgatataagRaYaKHctggtaaNtttaaNgaRHtttatt -atacNttttaDccttYgtaaacaggaagtgataaacatBgaSgtaaaaaaVcBgtWNtRM -ttBttaaBgtaaaatatcHNStaBtaggtaVatYaccNtBaWagRctNSacRtMatDact -StVctaaDtaYSRgttaRNttttKggccagaaBcatagtYcaYNtDatcgtatVcaatWR -taggaattMcatRtgggatgtcMggMtttataagtaBgtggacNaaKYtgctWgagYtWc -ctWtVcttaaactaRacatggtRcatctSDcHcMgcaactttttagttaccttattHRgt -acggcactDBggtMHcVaaRatKctSHacctacaccactaaHaacgSttagKtKttttgN -HVgagtaYaMtVYNVcggttaSBaBtaatttSRcgtBgaWaatctttttKggacaWKaat -tKSaccttgDRgtcatatDatVMtMaVcgaattaNaagMWccctaaHgataatatgtatt -WataaaatBaaMtgRttcHctaagctaagatatattMcggactaRttttKaSttactWYt -gBcaMMacRRgNtactttaaaSKtttcaYBaBttaVagtRtHcWaggaVccttNgtgagt -catataWttYScMtWgVRgattWtaSggacggWWctBHatattataaKaagttactaMRa -aataSRaDttDaaatataVHaatggaaBDgWgHtcKStVcatHtaatcatggBWaagHta -gtMtgHcHtcatggggWcatacaHNHagcDatRcaaattcgcttgDggDNVcaacgSgtg -gcaccttMttaatattVYtVgaagRttaBcagVaYaHcaRDBagatgaVHtNMtcttact -DaggMgMaattRWDcctVtgagaaaaSKatHHttVDgtctgtcacatHNttgaatSaagt -KBatatagacaaRVctcWtgtacKtaacHtgHataSgVtactaggtttatggBgtcaaaY -aDgaaaaaatcgMtagaKaYatgaattatYcttKtacaatttgWttMaatBgaatSttMt -NaVgVtScgcttctBHKgtaRcNBaatcDtacgattgacgtgctatNaaBtMgagNgKct -tWcWKacactYgttVgNcgaattttcttgaaaaactacccctcgcNtgMctatcccacMc -actcMatttatttagtagaacMNtttcttgYKaWtaaBtttcWttagHtgtttctcttgt -ggctatgDgctaatWDataatttagaNcgcRRNataKtctaataHgaaMYctNaKWtact -aacDtgaVcgagaactggtaccaactHgaggctagagHHagtMgKtaaactacaggMatg -tYgSBaKaaaattMgatRtggggtHBVgttaattgKttaaRDacgMactcaaacStaaag -ctctgtgccttcgtSagtSaRctacaataKatattctaVgtgtaattRacKagttattga -MtaatgaNatacDataaggactttccNtStatatKaagaataKtatggtcctctatgagg -ttaaDtgtattgataaaactggatcactKBtttggcgtcaaagaaaNtagtWKatctaaW -BactDaBaYtacaWtaSgcaattattWgaaBgactgaKctatBRgtagttaBaRRgattt -aagBHctStgtVYRtaaataaagtMWtcHgcattcacaaMWtcMccWttgVgcHaWttca -NtgtVaggNgcVatKttataaWDcccctatgatVttttattacagRBBWttcttRaWgaa -tBVgcgtHgWgaccagtYacaattgSttaaMcVtDatttaVttRgttKtcaYWatKtaaD -tttWaYtaatYctSctatagtcctBtccMaMMtaMYHaSSgKaaacttctcBtMtgDtgt -ttttagRcgtacttataHgKtNtMtKcBtaNKaHStgSagYHtataDtcKtagRtNWaac -VgctVtRtttStNtgaaccttaVatgagaaggtcaKSttaDataagcYaSatNStcaatD -NgttcgacaatttaSgaRaBNNacattRatNtgSttHVtgWHgtSHccaactKttYtatH -YttVtgHcNgactMcaacttBatatgSgattttacgtatttgtggtScaacggYtHtgca -tctatttttWtaSatcagaYatcgcagtgtgtMgtattctttcattaRatttStcaatat -gcttDtStaaagaccDcVtaWNcHYtWMaMcgaacKcaNcttacctaBtgcDacatcaHK -tRcDaaacataaRacNNtccDataNactttatBSDYatDtctBtaBatctDatKaMcatt -MatatcDHctaagRgYVcatgttcgtgataHDYaagttSgHYctaaatgtaaaactNgta -gaaactaattRaatcttttBKcgaatSctMaggVaVaaatgagataaataSgttKgtcat -KaKatDYtaaaRttYaMtgctcSatRtagttttagcaaNtaKgatcgWYcacDgaatcaa -tactgBgaNtaactaaWatatacaatacactaNatcaVaKaaMaaaaaatcaccBtgttg -NctaacaBattttaaKWcaggataWMtaattgtaaHtgVtcgaHtScaHtctcHacVata -gtaMcaaKtcccSagMYtWcaaatHHtaagRttDagtMtcYtttaaWWaaaVaRtcHNtc -tcSttagcacaKttgtagtNgWYtatKDtcatttgaacctcKHtatccttattcttNggt -BgtgtKaggWtYgtStgtVaRtaRaaagtagtgtcgcKtKagatgagYtttaatKcScct -gaaaaaRaaHtttttaaaVgtatagKctaNtKaSVgttcgagacattttRSatagttSac -ataMtaYHccacttttctatactagtatgaBaagctttaMtgaatgtcaKYtaaatatgg -attataNcgBHatcctaRaaactgttgacttYaHtStcatcctDaMBttgtaWgagtaat -WKataaaBgBattcttttctttaatWStaatacgNaagtWaMaaNgactMtgaaDaggaa -aSctaSSgatatDttattatcatagBcaataVcHcRgcStaHaaatWagatHttMHacta -RacttaYaaaaNtataHKVaataKtatgatcgtcVaaWgttYtVcaaYggctRWttaaKt -RttDaKtgtatcaattWKaatBHaaaaNgaatggStHgVVgatMgBYtaRNgBDttMcNt -ggaNgtcaHtgttDcNaggBtatYtacVaNttctcWtactHYcSctgtYtDtgWaatcHg -atDatatcHtcttatattaaKaRYaDgaatgSYcgactgcRgaagttagtStYatYtttc -cgacactacagKcaaagDttaatVatcttaaacRaDatRcBatKNtNtaaaHtcBgatKH -cWStSRaKaSMgtaKaBacWgDDttgYaaYttaNtDgHtatSaSataaaaMBaaDtaMat -DaagWtggaMtRcacttatggctNataaaaatatWNMtacctatgtcaYKaRacagttHD -agccgtaaYcaatataatcatagggaaSatgMYBcKBBtaaRVRatRtccVtgtgaagVN -ttcttagtgtcWataVggtaaNaatVgVaKctttNgtttagtaaagBatBtgaYSagHtt -SYaacaStcgcagaSttcDBtKtttggtctacNttgNgKNNtcaaaaKWactgaaYgaYa -ctatHtaWcaactgttSatNVtgtctSttYctgattVaatKgtaYcaaattSgttaStat -ggtccaatgSWccaaactattgccgttacgcNatcHctctcaKatgtagtctattttaag -gHRatcDaagSaVgaVNccaBKtacgtttStagKgtctaHtcattaYcctaVKtttaYaa -atYtccgataaaVttcDgatWcgBtcctaatttNaattgctDYgtgatcaatttaagggc -tctcatcKattgBtaBagcaYcKctctttNtaacHacNStggRtMatHHgtacatgcaMa -gtgtccatRWttRKctaaaDtcMctttaNVgaNtcMatcacHcctgWtaaStcacgtctN -aagRNNaagMaDtactDgctttttcatcYacttaKttatgcStDaStNaMgDtaacKtMt -acctaaWattggtttNaaVHatgaaattaattacgVNaaWtggaWatctgVatcacYctc -VHMtVaNacNtcccaWtttgcaacctcWctHaatcttWcaaaYaBaattSctYatctaag -DgBttagtaSgaWtBcRcKtccYatatcKBgtctttatgaaHDcgNaMatggatgtWagR -ctStagagaagaacagctWtNtataaaataRatHatKgctNactHgttRgRgVcRacatg -HYaNttaHtattaNStaagatgtagaHcVctcYgggccYcaaaatgatcttctagctctH -MaMMgcaVtgHgtaagaWHHtggtaactBcaMNNctagaacggWtctttgaggHcYNaaM -HtaYcttKaagtSccgttgggNMStatacDttataaaVaYcKtcgcattttcgacctctc -acVttNtttattgtcttctaVcatagaattMttgtHtMgacataaatagttctMtgtWgW -ctttcaagYgcgtNaagcaaDaVHaaStMtaaagccccgtgVgtcacatcHVaDtgttBt -BacBtcggYttDagaDYtccMttagcttacNcgaagatRtDataRtgctaatatatgRtW -VttatWKtgcBgactcgagaSgtaaaaagttaaWaaagtatttctcWtatcBtcataacN -cgctcRKaaDKactRaNtagtatBtgaaatttcgcDactttaNtYgagagaNttgaatta -ataaaSMattRHNtYtgttgaDBRBttgWttagSatgacDggNVagRWcggctacDaYSg -aaattHgtYaaagctccVtatacattaMctttgSgacatBKaattRgtaBRtttaactat -tctagcMKMtttctgtgtgVgtctttcDcgtaaMtaggtaaaDtcaYtatccgattcYtg -aaRttctKaNctaYgYaattYgRttWctWttaaaccaatcactVatgcgYttgaaatgat -KBcNRgctcatgaccHagcgaaaatgtVgccatcaBSatKccRStSattaaatttggtaa -gcVattctgVcattMtacatMgaaaaaataYNDtDaatcatWattcaggNcaccctcBtg -cKcHagYtatBatgBttgtVttaYBgBgataaHNtacRtcaaBaKcagNtcagaatYgtt -WgggaNDagtatagRtctcDtDaHScagttcYcatcSYacHcagagNgtgcHagtacagc -tgRtatatMtaatRaWMHgaaKacaBRtagHtaaaNcVHcatWBgWaaacWccggtaaRc -attgMgttaNgttVMVttgcaagagaatcaaaaaagYScKVtgccgacHgacgttcaMcc -tcattatgcBttttaagtKatDactccgBatHYgttcatcgaaatctSaKaagaatWVtc -gttgtcttaMaaYaSDtaaaataccgcKMtatgKtgScaaDMaaaactgtgagcVtttaR -cttgtaNMatatatttggtMgYVatDaatttgctttaaRtaBgttaYaaagKtataMtWS -tcHaaaaNacgctacMttDDgactacaNaatBcagtcattatatSttaVgRtWgSggcaa -tSataVgSYgctBttataaYRRgaactgtgHtgacHWSactYNgtttBactatWStaNtc -StcMttgattStacctgaattctWatNaaHgMatattcaaaKWaBaataatHKgaWgata -YcaWMBtgtacKagaaaaagaattttWttDaMtggttgtgaNMtVtDcaacNttactatt -acggKctatttaaaaBKatagttHaatggaatatYWgtaVtNaaYgataatMaccWagag -atRttMtgKaMcgatattaacaagatgttBBcNaYattcNgtRttgaBcctaagaSMttc -MtcctcYattcaNaRBttaatgVcMNgaacKagatcgNctaWVgttaaYRtgctSctaaa -aNtttgctaaScttcVattaHtaaMacNgttNtKHMcctattttaRtttVtSgtacatBg -tVaaSSaMVaRBcaSaRHtaWtWHttMtattVcaMtWaaaNaccccgHYtcatagaaRta -aBaatttaBccaatcRctcatagWgcBHRtacaaDttcBgaHggcgctaHtgacagcSNa -ttcctcgagaccBggtcaagWctgVcRDgVtaagtttaattatcMtgatNagYttHtYta -gccRatagDtaatcNtaKtacaMSgDaaaatttgHaHtRDgtaattKtaMHgaBcaWtBN -YaWgtttStttaSttgataatgactMKatHBtttaVcYatgggttttaDKcSatttMata -tcagtYaBtgVacaatHcaDMcccgtaataatagDataatVaaagaagaVtctccgaRgt -RtaatcgagtcacttgttSatgNDHaSNRcggtaSaagcSaBgWSgcatcaaWatgttac -atgattcWacMtagtgNcacgatgatttttRcWttSgtaatMRRBaacNWRHaaBaattD -aagStgatccttcaDacccctKaagScSSHaaYHWcHcaWcaaaMBataattgDtagccW -tcRHataMNKtMgHaBcatcgaagtgtaRgtgggaVMatgttaWRtStBHactaaRaact -NctcHaaaggcatgcVHKHgaatcSccttggSaWatWtNcaaBctaRagaaacacgcttc -KatRattcWtgYDaaaaaaNatWtKgaacgtNttactgWHBaccaWacggttcaaVgaga -aacVtMttatagaagtatWtaaaNHYaMacagWagtaatttgcatcttcgaatacggaHt -aatVattctaDaHtRKRaNHcttacatcDKttMDKaWggDtaatcttYctcWtRaaaaKt -aatcctgccccatgcgDtctaaVMtWRKKDctaatatDgactagWtaaaBcKcacMactM -HHttgDataKHDaDttHttatttagtcaaVatccKWtacWtSVcaggtaatatDSatgcc -tKtatDtttagacKaaaagcgtttaaSaaaYtgattgtKtgBMcKttgDaaaagttBRat -HgcaKgDgtgcWataatMWgcVaVatcYgWttaDatcatNaVgtttgggcttgaHRDaWg -atttctgMHgtVtgccttBtWtaatcgttcgKgRcaBaRMtaattWgctaatMaVBccaH -tDagaBNaataRcacYcYcHcatBgaNtgaNgKHttctYaacaaaYgBttRNtNggaagc -WtDggattgagtHaWttVacaaaBtgttaNctaatactKaMaaaaaDtaRatttDaaagN -ttcYcaaactcMgaYgtacaaatMaaatYtcacVaacgaaDagatWgBgaataggtWtKa -aMtgDttHtgagttaatttgVaaDagttNMataatttaSVattNaDtKVccaaatcgaYV -taaaacKRaataatgaBDtctRtgVcttatttYtgaHgttBWatgaatatacSaacctSa -tNNRccagtactKagaRtgSKMcgaaDattttagtHcKcaaagtggtataaaggctccta -SatHtaMtRKattaNRcWtccgctataKggatWttaggtaatHDRatttattRWgcgatc -ttagSgtcttactatgYgttYaVBtgcaYaaRtDaatacHHtDcttHgBgNcccataDta -aaaatctNtacatatWaRMBgaattaaaacgctctctcaagtKcacNacgVRVcttttta -acttgctcStatRScaRaMataNaKagtatcattRttNaVatcKgtacNatttttgaNcg -acaaKctHWtgaKStacMaBatgWttNSacaaKcaDaatcWaKaccgYBggMScgaMcct -agcaDatgtttcVatgtRBtKNWHtcctWDtatttttNNSaatattcMttgatKgNgaNB -atcSggtctRcttttttatatggtNttDYNYgaaaKctcacacYHRgttacatacttYac -aataNaagaaaagttataNaataSatacagttScacVaScaccSWtccagKHtaatcaaa -tVacatWacgBctccaataHaaYtMtacKacHttttKtcataWWtgtgaatWaataaaaa -catttcaccttaHtttgttccaatcccgRBaWgatKgagtttBaVgaNtaNVBgcaataa -gaatagcaKRttgtatcaattaMtaacatataDBgtaaNttcaNcgagatYactggttat -gtNVtaBNtDaaDtDttaSaWtactaVtHactttNttcttcatWttcDatKaacgtttgg -VDaDtVagttatgtcagactKaatcaYtSgttttataaataDttKttKagacWgHgatat -aaatcttagatNKtttWtWaaatattacSHaRgtttScttaatWttacgRRaaMactcat -BacaccatRtttgaacctacttcDMggcVaSBagaatcttaKMagcaVtctDVataWtSg -atagacttBctDtBNWgtgKatWctYgaaStccgVaaaDattYatagtatcaacBaWYct -gaaatttaKVgYtStNtcaVggtggaNYgaRtMaacataSttcagacVactcaVaagtgg -tattaaDBNDaagtatatMtactatatgatRSgtttgccaacgcacRMtacRYNataaga -tcMgttgatcataaacttVcatatgWtacaaaWttggaaactttaScataactRattMtD -acVYataaaagMaattttKtgaBttKcaacatattVtagtcatgactcgDaacDtaWcta -tRttSSYNtgWaScaaataagaaatKtagacataatggNaatttcSKtVWtgacagKWat -tcgVatttcKWgagcaWgNKaaaatatgtaaacgttcactaaWgacaccBNaacagaaSt -ctgctaHcVtttMtcYttStagYcgtttBcRtaYacttgNaacMtDRtagcatgtgcgag -cScaMgtaatBaKataactMttttattaRcattattatacgtaagSNatVRgcttcgaVa -acHNtctaHBKYgKaccYcttagagcccaVgatttgttagactaaacgtgcaBgccaWga -VataggattDBWaattttgtBacWtttttaatDtMgaactaagcVtctcagBMKatgatt -gaNaVttggatDaSaBatttcgccatatgctaattgYacatgatccacaaMHtttcKYKa -WtYcgDtNaaDccgNaNcacacHKttDtttaggctagRVtYgtaactagctttcacaaat -YtHaattYacaattaMSagMactcctcatgtScttcaaYtataaaaScHYaKcaYacact -VcacataNtaBcaRatgYagVBatttgtaactttgRggacaagcVacctattacRcaaMa -cHRagagtaVNctacagtgagacgaaaggKttacattgggacaataKNtattcaagWKtt -gatNagNtgctaNgagatNacSatctNatttatctatRgaaaatKatNKSBcKactatac -StcagtaggtVtcaaaBYYgctattKtWNttcRacaaaNatgaacttaRtaaDSttVBYt -aatccagtNaaacRttagaaccRBatataWaatKctcattcSacWaacaacactDttVtt -gacYaagagtaSgcMttBttaVNgRVagKDcttcttcNtaggttgcgacYacttaaggVH -caagDagaagataaVaatctgtatRatDtKaaSDgattcaattYtcatgYgtgaVMtMaa -ctaagaatgRgDtHttaaccaatStaaaaMctVDDtgttatcttaBBgccNacKMaHggc -BMttctgNctHggagaataYMgtaMccaataattHttYttKggtKaccaactcccHtMSa -atNactcRtttcatgcKcatgcacttcatSaatatactttVtaYttDattgWcctcactc -YccattaDDaHaaKcaatSttagKtWtcatRcaactattaattYaDggKtagtNcgSgtt -tKRgtDWtVHtDNcHWNtKtccgtctagtatSctaBcacgcaBtaacatgagatVtttaa -ggcaVttBttaStWtattgYaggtSatBMBDactVtggttDagacataaactactBgcac -aacMaagaStccaWNaaSYMYtgtaKaMcYSaHaaaatatttMgtcaaDScaKtcaBVta -MVMRRDMtcttRBgWctaacttgaacNaatgttWgtggBtRttHVKgKcHVtatattSaa -aatBttcBtttcDgHccBagtRBRttaVagBctRcaagcattacKccaWVWtaVcggtta -tNaSgccgKtYcBaagcWgcatgaNHaKtagNgcHcgtgtcataaaatagagacttgHYa -tattctaBgtttatRatctatttagacattttNtWaaSagtaHatRtctcggatttatgt -gatBtctRggggcatWctaSVMaRtcatgKattgRcatMaHaataNcBcDcaggcactat -tHBgaatStatattcatBgMVataaSacVacKHatggttaaBKtgtaSaWMattttMacK -tgaaWaaWgctgRatgtgDacBtSaHtDgtgtMVttagatgattagagaSttgattgtSa -aacagHaaatacaRcaccBtaaDtcaMtKaaStttatKagaataaNcaaBtattKaVNaW -aNactagtYattaaagWgHttaMcKaSagatSactctatMSagtggaYctcacKKgaSMg -cRgKtgccagNMataatccaVgatcttHagttttcttaaccataggggcttaDtYatcga -aaMataagcaaatBttgHHcHagacagagaggcacWtacccMttacgtgNttattYctVa -aactgttaagtKatMagttcacaaagggatgaVNMatgcaSattatcKagtHaBtgaagB -cggagtWttVaaDaccMScactgVatccaRaSatattNtgcBatgBaaNgtcaBMgggaa -tgagtatRgaatgtNttacaggcttaHaataaHSagatagtgVctattaaagggaagDWV -ccatcKaaaatRccccaSVaaatttMtatStgtWagtStMaaatBctgcctKWgttDDaS -KactctaaVRtaSWcVactggaaaaNMaaaccgcacNtaVgaagcttDNgaDBtaMaMKN -tKccaVtgctcttMMYaaaaHaattcWgHcgtacatWaMaaKtaataccgBDaYRaggat -atSKcScYagMtaatKHMtaaccatgHgtagDaggtgtaaatatagaKVgccRYctcRaK -BKWtgatHYcaHgBaYtttMcatataatgaDttcatttaStgtcVSgacggtggVgtBtg -acatgtaaSgtBgatKtKtaYcatVtNattataaaHaSccHaaagctSMKattcatagca -cagtgBRataacaatMttKcWaaaaatagStcggRttaattatWaataatMaYagatgVt -atccttttHaScgtBgagWcatgBtgcctatcgtaaWHacagtactgaattaaaaaNatt -RNMaSSNSctattcaaagccVVcatattttagMcgtattNtVBactacScattgKVtata -aKtttgNaWcttNacctagtgaNaaDcagtaWgKggaaKtacgcaaaYttatacSttgYa -YttcDNagggttVDagHatSgtacYVatataVattataSataacgKgatVtVacHYRWtt -atcctaaDtgtaaDgRDttttattWtaaDttggatcattNgtVaaaVggaaggcYgSWaa -attcWHcgaSaVWaMatctMDtHBgttttaatctaWaagatatDKtVttaccgaMatRaa -aBttaNagHatDHWcDtBVttaatKtMataYttSRHHcgtaHDtggttccaaagRRtaWt -VctRcaNDttatacgatMcaatNHtacgaattBaatHtcccatctctccBtgtataYcta -tgtcgaaDYWtNggatNcacRtMaatNtKcttSYSctaDaaaggctDaStatKtataBgc -VaatttggYcttaaatgatgtHctaaccaactttgggttcMaaDattatKtVacgVcSca -actSataSccHttYctttgtggcDtMcactaNSBtMRBMaggttWKtattaatgtKHact -tcaMVatctgttgtccaaYNtaagttKaacttctHcgcWtYttatMBgBaMacaattaDa -actNaaatSatcVtSSgatctatgNatSYaattRatgcDgtctataagagaagRgatatt -tcccaataHgttttWKtgaagNRtctaaBtWcHHcDgaattgaaaKtgttaaRtatgtaM -aggDttcMaccaMaattDctgYctaWtStaNtgRKaBtNcMHcSttMtaKccYacgNNct -ttatStgVtaYtaagttaagaBHaaStVKHatgttRVWtataMtSatgcaattcMcttat -KgMcagtgaatcYtcctNaYcttactttctcttcatggcgNcatScStBtagctWtHaaW -attaccgtctcgtBMcaaacKctcccaacttBgtWStVttMRgKcVagHttVtaagMaNa -tcaHttacatcYKttDBtatgSattVcgBcBVYttHNtcatKgcYgaaSaKtatttttMt -ctatctaSaattDttcWagHSacgttagYgacWaSaDKatcNgctaatgVSctgctYgaK -gKtaataggtggagcgtcgaaaaRYtgYWYSaatacBgacWtaNStcaattWtRctttta -aSYgttcNgtBWWgtgaatHttttBaMcMtKccagtattttcgaHaDtSVgatgaacatg -cacgtcagagDYattBcagDctcttNcNtaaaatRctgMcDacaagtttagtcaaSSaag -aaacatacaDtctctYgcaaacBcaagaBatgtattgacgagYacBDgttcgtgRtaMga -attttcNtgVcttctgtctagtgtccatatctgatYatNtatVWgttacaDacaHDDagW -tgataWtatcaaBRatDRtMgVcgaaattcSMagYgWacgggtaacaaattcagcatagS -gttactBctgSVWatYcYgcBWgggRcHtataSaattBcagHgcgcctttKcttWaggct -ttaaDtRacBactaaVaaKtaaacctcgcgccattactKactKSDcgacaVtatatagga -taKctcgSatgHSatVcgtagtgaBtSYtgaBataatStaaccaagttcaDtHtatatta -acYatattatcctacgagatcaccgtVSttctYgtcataaVactcgWtaVatttgttgga -ctaaaVcaSaDtYcgNtYtctVaMtaattatWRtWcaNtaKcaaYggatgNgaatcaatc -RtcgagtHcgVgttataHDcatttaagttctHtcgMRHtaaagaVactBMtatgaagtaa -aaaBNtataaNttcKcctaNttaaDtcgMacgDcaMatttgYtaaNtcaccgatgagMtg -ttaggWcacHttNgtcttHYMcaattKcagttcNcaaaacgNaaSattgKttaaBaKtta -tttaMggHcttttaaRNVgttaYttttMVRtYVgRatKcgVtacgaatttccBatBgYBR -tSKKctaaaatgatatgBtcttcgtttgacHagtaattatatctgDtBttatgaDtatKt -cKRcRttagattattagHgDNaaaKgcgMtHtttKtDtgaaaagtaMatcagaaccgaat -KgtatatVaccRaKYtDHtcSagtBgtgccWaaaggtYKcaHatDDaaattDStDtcKgg -tMgcMtgtHtcaaVcgtttNtagtNtgKgctaDcScgBcWSatgtatagcKgWgttgaac -gagtgcgcgtKaaaacgRtttccatatatttttMgaKagcVcVRataccWctctcgBcga -ggcgttaatgaHYtttHtaSWtagcagtttKtYaacaaataMtaNDatRgMBaBacSaat -aSDctgaactattgataaRtaVtttHatWaacWtVaHaaBDtactYtaDactttSgtKtR -attgatttatatattattataattBatagattctaacDcRMaaggttcgtcatattRVYc -ttKgtRcgWaatcgaaWWatDctacaaaagaattHaatctgttttacYatKatBaccMaM -aaVtcacStaaYgYKgtttctcattatattNgSaaHtgRaBtcataKYtHtacttgtaca -aaDtYtgatagNRcYatgaStaaagactgtcWDtYaatVaNStagaaaWtaaaataDYtc -aMatSVBVaaaYagaaaattgtgcDagWSaStattttaatNcacgataNBtaattggaat -gcMgacattHaattctctaaMatactaBaaattacaHWgBNtNaaSattttaacHtgtag -tBtcRtttSaNNaYaMaDtatDtagaKggYgcaaSttgctactDcNRtWgtttaVtggca -aactattgSgaagtattatgDgcgtgtcttagcNtRctKggtMaHgaDaaagtactgtcg -atttagatcagNggtaattaKaatgaaYaaHaattggttVaaMggatactctaBgtYHMc -ttccVcaaWtgttHHRgagttKaaagaBtaRtaaWaggttctatRatSgtatcYtaWcat -gtaBtcaatctaatRgaYYtWtccattataBacttWtcctaHaaaaggttgacgtRattK -gaagcattSBtttctaNcSctSStNtYtWaWtgtagtcttgtctttaagNKgaagacgDa -RgtNaBaVDgaattggaYtaccSVYKctSKKcatagttgSttatcStactcaatSMataH -caKgatWVYtNacagtttBtRagYHaagtaNaaVVDgatattMaagattagcatcctaMa -aMctgNtMcSaRcgctHMttaattDtttYttcgataaagtMtaagttaWaaDcaatccKg -tgMMcatBgtRtaHBcttgtBaBggcaDcgaWttgggtaDaggtgatRtYaMWDttatcN -tVcttRaKagctRgtgcNaatctgattatagattagtatatgaataDNatcYaggKRaca -atcaHcaagttagtKgRatRgttaagaaaatacVctaaaagtgtaagKVgcttSWaaHat -agHctagtDgDtSaVtgatcatttaNKgKHataKBctatatWaNgtttgcRaVNttaDgt -cttagHYKatYaVaBtaatgaBattaYcNtgcaBtHaacttVtccatDagVaaaYgWtND -BgacagVgctcaRtaHaaacttttacaaggaSRaaatagaagaatacccVaHatcBRtct -tttaaDMHWtHgacMtctcaagKDttctgYctctcNagaMgcgaaDWatMcMatatttDc -tttactaVSctagttcaRKWgtttKRaVaaKtacaacaKttatttttggcctataaDgtc -BctBDgcYYaatNactcaaRgaRWcgattgVNcWaatctgKagDMgctatKttRatcatt -MaagtctaRaVaattKctgaKtatccgaaRatcHMaaaaaagattccacgtacgaDStat -atctcataggtacgcgatgtgaaggtHYtatWagKVKgaMDcaatttWccttgKgagtct -agatgaatgVRcctaMttgtaRaaYtRtaacWgaaaMttatcatNcgttactaaggtDaa -ggcagtRctcatcaatggYagccagcgaatatagtgttWtaccaRctagatttDtaaatR -cDKtccHtBWgttctWaagccYBSgtggaaagHNttHtctaaattaBatggaDMgaBgat -atcaatactcMtaaKtcYccgatDaYgHDBaagBattWattgatttttaagaRaaggatg -gYggaKttaKtBVBcttaWcttBtacctYaNYttgctgtBaaMtWtcWaagtaaggWcgM -DaaNtccWMWtatcMVgaSaRctaKtBgKWDacDgaaaaNgttcaaaaataMcttaWtat -gNaVaaRataWtgKctRatataagtgttgacgaKgaNgtaHattaaRagSgattctatgt -YtcaattagBYBatccYtgtNacHagHtacVcVacaacaccgNgBtataYaatWHSttat -tgctDacttgtgHgHcMcHacagctRSDtgattaggaDHtYagatggagWtaMatcRccc -acRaaaYagcagatgatacatatttVBBcaaMtctctaWgaNtttcctaVcttaYBDBct -RgSaagcNgatttcacgtcRDaVBttaRaggtaaggHcacttccgDBKgagaatttataa -aMaRattagcVgtttacaaagagaaaMtgDtttYttggcttataKaStacaVttBttctt -gBcVaataaagagtgagBgcgNcNattgaaacRcactDaaccaatWMtaaHtBgaaacaa -ccctcMctcaaatctMWttggttttacttagcRtttacatRtccBttVcatgaaBacaYg -agHttatWDtcctSatRtYggHtNMttRgNtgcatcacgacagaHgtYaSaactgaaNWV -agtagttagaNgatctgcatWYaDacataHtaWttaatHaDgactYgttcaSVtttacct -aatttaDgRcagacaDtgcYVttaagaSSKBYtgHtDtNtcgtcWttDtgtcNtgacKag -cactccDMacDNcccctWataRKcaaatttctRVaacagcaMtataaattWBctttgKgV -catttaVgtDgtatHtgtaSctagtatagcBtBtgtatgtcgcMcgagttctacgaaBgW -ccgaWatgcaRtWtaagYttaNtcWaHtgattYDatWRgRWagtRcHggNatNttWaaac -aSgcaatMatgacNgggaSatgatttcBHcctaaggWactacagaaaagctMcaaagaYt -HVgtaaHKgKattVaWtttcctaWgaKattatgMaattBgaaagtgaSaaaWtSNBtttY -ataVgNatgaSgcBaaccatattcctctagRtattatctttctMtgaRtctcYgaatDtR -cHgcRVtWtaacDtcacYatRcttNgcgaDtVctWtacHtatatgtatKaaggtaNcata -KRaataacacDctcctWgtSaWcatcDgatatBtaatHSNBtcaataaStHtacttaYaD -aMtaagMtgNaaaaNccccgYWHaatgcBcttaBcgtMBKggccaVgacaWgaaaaaVYc -RKctMgcaccWctcSacttcVtacgaagtYtcctttttaYgttattaataactSttRggt -cVgagWRStatKataYcaatNMtacttcgcttVBaYRaKttaaYatacagctBgagcttc -HcaatBaaaVcgctcacaMgttaHaggctaDtSgatattggggBgRMagtaattggattg -YYHtVtcttSRYaacttataBtNKgatVaWSDWacatVcttgttgaagScaDaSttcact -aattagatKttaMcHtMgKccaYatKataMcKNgattgtYtaaRHHcaWagctgtgcYat -MHaatRDgtgttYctatNKtSDtaKgcBttgagtKtacatgaaggcgMatDaWtcBatag -taaaatNYtSVgVatttcaNgRtaRaaNBttggaatVgaaaaagaaggtgNtttVBgcct -tgtgaBtgMgtaaacBgtactWgtaacctatatggaSYattYtVgtttaagccaRtatRM -cgWMgDVSNgataatBRccNagagStHttBgctaBagatattaacaagaggttttcDaRa -gtcDgtHttcataagaacaKBttaBgactaRatgaaDYHttgVagcMcBDgYactWgSga -cBataMMcttSaRHgcagKcgaaYaDgttcataYKcttcMWttattaaBacDcttDtttB -catVggttVHtgtMgKcgaaVgtcgMaaHHYBMaHtaaKaDttaNgNtttttaggMcWtt -NaaaDaaaaactRgaatagSVHtaataagttStccaatcHataatacMcattHtacaatt -tctgatggacatatgcaaacaKBatgcagacagVcctccgcaacNatcMaHtcMtaSctg -taYgtStcBtcatDacRggttRgagaaHatVcttYWgaDtatgYcaBKgtSWVYtttctW -ttHtctaYttttaBtcataaNgtBRaNcgttKgtgVKgggVtWatcWagttSttttttaM -aRWtccgttttattaHatttBVtataSctRWtgcMacaattaStBcacggaatRatactV -gaagMaaagWacaMgctaacaHctHtaatacacgaYagtcttKagcDttaKBHccgtaHa -acaKVtcMKcaataaaNaggttSaatcatgaNaaBtacggBcaagatcRgttttHaNgtK -ctYatBHHtaaaDNHtaVtVagttVacKtcYgcattcatacaaagtaacKaKKtaaNtNa -taaNaaSaBtagaattctgacacNtaHtataBDttBctataatagYSctgtaHcgccgaM -BaggttaMHtKgttactaaHaacgDatataaagcaWtgaMtttgVatcKaattcgHVNat -NgDaaYtataHacaaacaagagtatatDStgcNgcRtaaWVVaDStNgtcaaacgDttaa -ggNttWcaVNaccctgaaaMcagVYVaMtBgtatacSacgSgNtaaaDtRaBSaWcNacg -YaggtcaYtattagVStaccgatgSStMattctWtattHtHaDtatgYaatattgtttta -NggttVatcttRcgaNtHaVaStgaagactcacaaatcactgataaKBtNHtttctWWta -ttgactacNtaWatataaaBaatBttgggtatYtttYtgttttVttgagtcVaMVgaatN -taaNgKMaacgtaatattKWggcagtgRttgtgacactaaYacactggaaKaWYRgcatg -cgttctBcttggtVaaWgtttHagtcaatctcggaNWtaatBNcaMVKStaNcMtgatat -aatDYMctttcgcatgcYtHtVNgStggagcBtggMgccctgtgNtVatactgcctcHca -taDBtaStgNcagaYttaMtcaYtgtagatDaagaHaaaRcRataattcaDtcaDgttgt -atRaaaaYaRgtttDBgDcgaagcNttgcVttcacttaMgtMWaYaattcggaDcgaVtY -attaBYaaaattaHVttttWaacDttaRaSWactcBgaRctacaVStBaaatRgaacMSa -agaatagYtNctcaatagctNttaVtgctgtttgYcttaatgtgMaStactgDBagVSgg -tSKMYttDatgtMaaSaVtccSRMgaaaactHaatWWtcatttctDgcMcggVtgtRtca -tctttNatcaatatYaKaaaatKWtDDDaaactaagtacRHtcKttacaataggttWctt -ataSaYctgctVtaaVggatcctaHVttgWtgHtWttaDHaNgaccctatatgcWtNtta -cctaYtttDWtttaggHNgccatattacKggattVatatcRcggRWMtgcaVRaaHgtaa -taattttaggtctcDccaatatgSaaaagatDtaaVtYgNaHBtcaYttaaaaacagata -taaagttaaaDWccMHMattggtaaagtccgaKtatDKaVHaBagaBatactataVttDt -tDaMagctctaaDSggtttgaDacacVatcttNtgatKtVaBStatgNtgDKYcaatcat -aWtcNatYccgRtcgBHacaBaatagaVtagcttgaKagcgHtttDNtgaagMttStttt -gDDKRWtagtaBgtgagtgBcaDtWtaHcctatHatttgttWgagcggDtgtRDRcaaat -agcacacRtDgtgVaWtaattRacVataBWacSYWVctgYtWDaVtaKataaacttKaaa -MVHaaaaKNtaaacttgVataaaatMaaatMaagtatcaaRtatSYRtBtaataattgtt -tgaWtaNNtctcaatNaataaaaaaattgaaaattattgtgttaaYatccccHtaNcatt -cacttttaMgVDtaDMtcaWSgYWcSYtSgaatHtgctagaVattaBtaaaYgatattcg -aaBtgaaDacacatRaagcgggagggDMtatDttaatttggaKSNtactRMttactgtBg -gcgtcatNttctattaVacgttccKtVttMacttWtctaYcacgtaVtaaRgKcttggat -SYatattttgttacaaMgtgagagaSatattWcagDttggNtNaaYtaggaaKtYHcttg -KattWagNgtaagHHVatYatcattaaaaaYtHgttcaaaataattatBgcaKWKtagaa -tagtatBagaaMtattMagaMtHcWgYcacgttagtgtDNggctatNatRcYYHtaacMa -SStattRagRcgataaaatWNNatgaaatttVtKcRtKtDtaaMcctccaDRcaHtBSWc -YtaKttcacaaMaataaMaactccgSgtYattDtaWctagatBtaatSgatgatHKgttg -caaaaagaScHtgaaHRDatSagatcBcggcatcatYVaatgMaatStgNgWaaaaMttg -cYaaagttSHaYgaaatHattBgtaaMRagSaSacBaagtttttcatgttaaYcagYtgK -tYctaStcaagcgtaVattaNatWtHgtHKNDtcNaKaVaStSacaaStttagaaataat -gcDSatgtaKtgMMtcaaagtNattacMYgtgctNgVcaaNcDtaaaVtYggtaaaactg -caagaWNcaaacctDSaaSgVaKtctcatataMtggBtaRttKtagRcctgttaHgWRaa -ttgDaaatttHtaccagctcagaccKaaBctaagtatWtaVagBgtttatgaHaaggatt -StaactWacKDVtMHccgtacaMWYctDtagatttRctaccRagtWcWgaaaaMcagttc -tgacSctaaaactgaatcacaNcaMWtWccYgtttNaatttggttaaNtggttSattttc -aacgYVccMtcgaactBtatcYttcYDVMttcgattaKWtatttagcaatatcagWatgc -RVaatgRtacWaVVBttggatRtaNgRagttDYataacDVBcaaactttgtttgaccata -gHMtRctaWcgacagtgcVcaaVgRgtaagtRaaaattSBKacBaatcagaatgtHattc -aVRtatVSSaKatNataWRVaaagMaacataDgataWHatcNYcMtatttggaSatttcH -cgacaYcaKaaatattacHcSaBVatHacactaMDataaaggcacaacaSacctgtaaRg -tcccaaaatWtDtagtcaagNtttgatDacDgcagaDcWgatDaaKagctDWtttatatW -gDcaaaWttStDtKtatSagVgaaKtaacgaccgMgaSaatRRcagYtgttNDggcHSca -aYDWtcaacgtaHgaStKtgMtRtaatccagtDaaacHgtacaaccHtagataNaattat -cVtgaKaaNaaaaaaaagttgMgtcRaNaacagtaKcaBgtttgaMDgDMacttattatg -aDgagcgtcacaaRaagtYaggMtaaactagaacagVaMWataggtatHagtttaaHtca -gtaaatgRgcatgRctgaMttBaaataagWVtcHctgtgtYaaaaVtKtaSaaBatMttt -gttatattattcaaBYctBWtggatBtgaggDagtgcacVataRBctaBaaaataatttt -tNggtccgtVaaaaataaattBHaatBaagaHgttaagcctaatcaaatgaYtKaatYta -aggtMgaRggtWgggNactaacgaRaaattSttWtaataaVtHgtacttNtaagHaSacg -WggaYggNStcctgacgDcgHggtttHggtNtMtttDatNDgtgacgtatacgatKatat -aaacaattSaaagcagatKtttSagcaaMttttgaKtMtagtcRacctKSttBttaatMt -gcgttacaaagaVaataattcaSaaWcBVacYKtacaNBaaKRtRtcgWtWBaRKVtYWW -WgattgBctaaatKaattaYtMtSBMBHDtBtaggDtcNctWYagtgSaaBaVtcttNgt -cgttHtgtMtctatKtatVggKaSaagtttattttatgtactactHtHttSMactatHca -agaattVataaaMKNtaMccgtgatDaHcaacttRataacaNgaatcSBtatgacBcctc -gggtaatWaaWtacacaattctRVgattctatgtgtatHagatagggacVaattttDtNa -WKagtatatHtagacgaggtatgtcagtgagHccccaatNataKMBaHtcWgctagtgHa -atBatSataDatatcacccaagattttcSatKgatWtgaagtcBMataaHaaMaattatg -cttWWtttcgVKYNBattggtacttcaaMaVNcctcHatcgctVcttKatgtctctBMgg -acatcaggacSgaKttgagtctKVYaaagtaaSgaaaHaWactgRattaaBttVaHtgga -ttagRWtaaDaaatgatttSMBWMaDactScgRYtgaVagNctgtSBataKacStHRatc -tVgBKaggccaRctaacttcYKtcaDcttgaaacBBataatacYMgWgctgtacttttat -gaSaaatYcccgattattRStccaaaBagaacaaaVtttgcttatagaaacacScccSaN -taaaaBgtaaggcDgtSttRatMYSWatcgtaacgtStBagttaVaaaScccSggaMDBS -gcaaKaggatatacgtatgcWactccgVctNttMaYtaaatKaaatgKStaaaHaKatat -gBtcctatgtVaBggaatBcgcaatgagtatHcYagctDgtWaaccagtatWWtaRtKag -atagtgKatatgaaaggcaWgtNKaaagataWaatHaaaaaKMaaatttBtatHtctNac -tKtBVVagtatcacgtMgtgttaKtaatcgaaMHtYKNcMaatgcaSaaDBaaaaagaWa -DtWMgaacatttDcatttaBaBtDttaaSMtcagcttttRWWaataattcNctactWaat -NaBaattaagaaacttYRHaccatatKtaKcNVgttYagttBtaaaaVtctcgVctagct -cgSVatatagVtMcaaHRctaHStttNtcattRaatgtaRtgttaatYtaagcattgaat -ttaKtctaKKgaaggtcgMctttcWaagcgWaKcttcYttgtgaRaagMtDatgWgYaat -aKaatSWScatKBtYgtaagagaVcacgctHStaacaSgBtgtaNRYaaWtWcKgaccDt -gaWtgagMaYgVVgBaRacYtcKgatcagttgtaKcgttgagNaStctggaatVtactaS -NtaaagtaatcaattaaVaaDattHDBaHKDHctVggcaaacccKMaatVtgttacBcct -StgBgMtScgaaHcatgctMtStaVttttcDaNagtDVaatYcggaDaYtaactaNgtcc -aattSacaaaaRgtagaaKgtcRSNtgatBacccttMtactatKgtaaRMagataMatgV -tVKagaRagtcagMgMaatRHNttagaagaatgggaatcHtttttSgtNgYatgtgcYta -atgDctMaaaMccVScgcRgKNaaagtaMtacaKaMNaBatagBttttcttttYatataN -aWcagatttgMtgaaacaBYtHaaatgtaDactatttNatttKttSattgBatSRtgKHt -tacgattgcggtaaaaacHHtaNgMcgHaVDtgtagaagatBaaagDttaacSatttaat -ttaccagatataattggVgaRctRtcgccVatRNtDgcagBcVaHtBaatDttatgKRKa -gataaRgcagtaKgagttatatcaMSagttccRcttaaatgatcttataaacaaatttcc -cttaBaWtagtagttMaacMaKaaaagHatYKactttRatgtctcgattBcSagaDKttt -HtBaccttNttVttVVtggttacgtaaBMRgatcgtctacaaNBtaVggttYaaggattc -caNgRgtagBtgtaBacaagtataaatBaaatKRtaMtKHgatcgYggDSgKRaSttHSt -catgtatatWacacRacHcatYtttaacYatatgtgttNtgcSagDHgataYttNattat -cVattcaaYttggtaRHtWtcgaBacgtttaBaccaBaatgtcgcNagaNtKtaDtgDta -tgDaBtcKgtBgatacNaccDDatttYtKggMtYNtaactgVacattaaHgMttatcgtH -MNtataBtKSgccaVttaBcttattcBaagtgaWtaRtcctDVRatgaattgatatgaWg -ccacDaatKaHtttacatNttaWNWgtacaggctacttBaYaaatatacaaaatttcgNH -gMgttHctcKYcttgMtaacBaDVtaatttacagaRttttttagcKagtKactatMRtgt -DtaattccRcaaKSttagttttBtctatagaKaVttttgcNagtKVccttagRgWaNaKW -ttataDgcgaatgMKatgatRcYtctgVagaccgcgVgactagaWaaHNtcRNRKaatac -tcYaNtSDKtcVVggNgDagtttaaKRgttaDcgtNNgtcaYttggtttYtatgtaaagg -attttagatattKMcatgYaaatcaVactcagagtRYtgtaactatagtBaDtVaWatDa -tctataaaSgggtactaYaKKWagaaaaataaattatagRcaaaVataVagatatgtagg -cWagacRWattctgacgcDtaBcYattgtaDggMatgagcgagaggctaaatVtctcagR -agDtSgKNcgtVcStacatttagNtgatSNgatcYVtHattHtBgMacRaattaHBacRc -NaaccctVaaYaattcVccatacKcttSagtctgKMNagRaNcatNgcgHattStSKYRg -gtcagtcaccattttagtMaccctggVattHaatVagaaMaattaVacatacacaaatta -attacgtKtagaaaMgatWgWccatYtagacKatctKatMcggYcgcatgHRtcStVtaa -tHgSaaaVagtgaatgtgYtattaYcRagatgatcataacacSgaYaactMKttatRcga -ataaMSatacNgaMatttcggccacgaYYMcaKattRagDtRtatMVBtaattWtMHgNa -WDgStaaSNggStcBcVYtaYagaHtacagttccMcgtYtYttcattgSWcttagttcgt -HtgVRYgaMacttBtatcaactaaaaVtDgataaDgtatcatYDttaStgccBacctaaB -agttgRtaSBtaaaagWgcacBggttagcMaYatttBgtaggtRBaSagttcacgtaDaY -aaaacDSaKattctgtSatatgtatggVBcctctgtgaaHDKgttaRttttBMHgRMgHa -gtagMgacgaagttaatattgRtHtHttatYaaagcagatgtgattagtggcactactVa -ttagatctctgtttatcattYttgatHcHttagStgatgactctDaaatcagtgttgttt -ttcYaaagtatatcYcaSaacaVttcgWtatKaaaHWtRgtttaKacttctgaaNaYacc -tNtcStatttaaagttKgtgatcctBcaBtctttaaaKagttgDtWctDtgtgctataKa -gtaNHatctagYgatcMgtggYaagactgacacttaRaaccHgttcaYtagWtggtgBcS -tacaMcMHataaaNagatactccaggagttaatcatRttttgaKNgSgcaggtgttRaYc -aaataBtaDtatHgBtatacKaataKtaggaaatatgcataaHgaKttttatMaaaNgMa -tYattgaatNtatVaggtKctttHattcatttatYtattagtataYtttagYcattagaa -ataHtaaccttactcatYttHMRagttctDagNSVgcgVaNNattctVcaaVagaattta -agaggttttacRagtagtaaaBaBaaMtaScKgVaRcNtctgtataagtatVgtDgHaYt -tcHYttaagatRgtgaattctYaaaattRtcWtacDDaRatcaKtaSacaagctaNttRY -agMSDKtWgWaYNgaaaatatNtaatatHMtMWRaRacaaaatgctgctacNKaKtagtt -gVatDaVccatSDtgaSggcgWatccBgaaVtgtaWttagatVaBWtacgWtaYattaaa -tMctDgDaaKatttgaaatgctWctttaHtggHaBBSRVBWtgattgagatccNcaaaHt ->THREE Homo sapiens frequency -gcactagtattgtcgggatcccattaacaggctcaaccacgagctcacgcgaggacatgt -agtccgtatctttaacgaagcgacagcgacagaactcccatggataaccaattataaggc -ccgtaatcctctagacatcgtttaccaataaatccgctttctccgtaatcatgttgaata -ccccagagtagtccagatgataaccgatgaaacacaagtctttctcaatgcacttacggt -gaacttattaccgccaacgtagctcatcaaggttgcgacatctagttgtgtgtttgcgac -gagcccagcgaacttcatcaactttcgtatattcaacgccttgtaattttactttaagac -gcctggtgatgtagattcttagataatcagtttgttatcggctgtactttaccataattt -cacaggtttcaggtcaagaagattatagctgtatatacagttccatgctcggtgcacaga -aacgtgatcggataataatcaatcgcttatgtcgtctttaggcgtatccaatacatgccc -cgataccgcagtgtatttcgacatgtaggtataccgtcgcatttgagctcgagtcaggac -gtcagctagattagattccttaatagaatataccgacctctagtccgaactaaactatag -ataacgccaacttcaggttaattgtctagtcgtctgtttgcagatgggattcttagatga -gtgagtatcggccatattggttcgagcactttagtttttgatgcataggatatgcaatgt -atagctgaaagtactttatctgtttcaaactcacattgattaaaccggtaaacctttaaa -gactacaagaaaatattcagtgagggcaattttgtcaatcacaatcttccagctagagat -acttcacaatttgtcttgaggctacgcaacattagacggattttcgcgttttattgaaat -aatcgaggggcccaagagtatccatagttcattttgtaagatttctttacaggcttatta -cagcttcttcagactcctacatgcttacgagttatatgctagcatgtgaacaatagatta -atatacaggaaaacgtacattgagagagatgaccctacacagcgcaaccgttgagtactt -tcattaaagggtaacgctctcgagacagcatccttaagatggccttattgtcaaatcatt -tgcagaagtacgcaagatccctaaccaacgtagaagaatccctacaaacacatgagacgc -ggtgaaaatagacagggtgttagtattcaatcttcggagtatcaatttcgccaatcttgg -tgagaaagcataccctttcttcagagaaagaagatcaatcataacactatctttaacgag -gtacgcacgcgcatcattacctgcctccatggatctttaggatagcggaaagtattggca -gcgtattgtgatttcgttcctactttatcaatttcacattcatatacatgtcttttatca -aaatcgccaataagataggatgagctatattagatgctagtagagttcgcgccaacatca -tcgataggaatactcaggacagcgtgataggacttttcaatccctaatactctctataat -tataactctctcttaagtttggaggcagtaacgcgctctatataatcagtttgctgcacc -attcttcagcctctgatacatacaaataaattccacagcagtaagagggtttaattgaga -catcttgggaacttaggattttactctaacatcaccgaaacgattattggataccgtacc -taaacgaactttctcaaggcagtaatataggacatccgcaataacacaaatgctgcctcc -ccaggagttatgtcttcctggaggctatatcttacacccactcactataggcaaactaaa -gtttaaatgttgattgtctaaaaaaaagatagataagagttggccggcgtagcacatgcg -aaagtgaatcgtaagctataattctctggacttgaagttctgtcctgttcctctgcaaga -aacaaacttcctttaaagctatttacgacgcacatctcagcaagttataaacatgttgga -agtttctagtcggaattcccaaagaacggatctatctaatgcattcctacatttttcctg -tctgccgatggtgccatcctattcaaagaatttcttaaaagtagattaaatgggactttt -aacaatgagtaaccttacgcctctaagggttcctcgagtgccatacaccagtcaggtccg -agccacatacacggagaacattctaacatagcattctcaactcgatcatttgcaggttac -ttctttcctatcctagtgctaaaaatcatacttgcaatcccatagcacggattaagaacc -taagaaacaattcagtaaaacatgttcgaattcttggtatgggaacatcattgcagctat -ggtctaacgcattaatgtttgggtacatcttccatcatataaacaggaagagtctgacga -cagggagtgcttgcgatcatgtctatcattgtgaaatcaaattgtagctcacatgtcgtc -tatgagagcgtgtatccgataagatttagaaaaatagaagtcgtataagatctcactgaa -cttttgaatgaatgtgaagcatatatgatctgctttaataaaactttatccataggatac -gtttccaaatcaattcaataattattagtcaaaatagataaggatgaacaacctgaaggc -cgatcggacgtagaaagtggtcccatcactttgagttgatattgttgaaccacacgttat -tatggttttcaaacagtctcaggatattgtatatacagataatccgataccagttgtctg -acgcccctcttacgtaccccaccctttgtgacgtttaaagcagttgttcagtattttaaa -ctaggcggcaactaatttggaaagaagcacagtggatatgtctaaattcttgttattcag -gcctgaatttaatacaccgcatagttaacttcgcggtagagttgttcatcatgcctcctc -taagctaccacttctatgatacaccaatagttgttctacggaatctgataattggccaag -tcataaacttccgctgcgttcaacccccttgctcgaatatccaactcgaaaagacagcct -tttggtgtccggaacaaatcagttacttcttttctgatgttaattctctgtggtcagata -cagaccaaaaactccgcggatttaccatcctccaagaacaaatttgcatcaacatagcat -tttggctacatattctaagtctcaatagtttaggttttcaactacattatcccaacatta -ggattggaggaataatagctgggtaagtccccttgcgtctacaatcgactattttttatg -aatatgcttctgccgcacctatggttattaaaaaagtcatgactttgaagaaccctgaaa -agatagatgaatcaggtgtaatggcagcagccaaagagcatataattagcaacactctaa -gaacattatagatatgatgatagcgatcgtcatgatgttatccggtcacaatagtagctt -catcagctaattcgttttgccagtggtgacttgcgctggaagaatcgttatacggtccct -tccctcttgatacggtgggggcttattcaaccgcgtggattgggttgtcatacttgcatt -aaacgatgtaaaccatctagtagtcaactatactaaatcacaaaatagtgatcaatacat -acccgcttcatggttttaaccatttaattgattaaagatattccgctaagaaccattatc -tacctaaactgatcgccgtatcctagtagtttgaaatttgatgtaccgtaatgatcaacg -aagtaaaacgttatattgtatgtagaataataggtcttggagctaaatgatgtgattggt -agtgaagacttacccttacaactttaccggtttctcggaagaatatactagagaatcaat -gcatgggctacataagcactttagtctaatgagataaaaaatacacgagtcttccatcat -gaattttttgtcgaaaaactcgaacctggtaatttaaaccatatatctttatgtcgtcaa -taactctcatatgttttatataacttcccaatcacgacttgtaactgcttgttcgactga -gctgtttgagctatgaggccgggatccggttgagctacatctatttgctacaagaaaaat -gaaagcacatttgttgggagttctggctacactcatagagaaataagtggcccgagtggg -tgcggcctgcctccatattcaagtgtatcttaaaccaagtggttccaacgctcgcgctaa -agaattaaagcctttatttcctccacggagtagcccgtaatccggttcgaaagagaccat -tgaagttaattttcatatccagtgaagtttaggcacaagcatgtgttctgccacatgcct -caaagcgctcttcaaccaagatatgattcatcctaacttcgatgaatgcgtctgtaacat -aaatatagaaggaatgattcggcgagttaattttcgccttctccaacatggcatccctac -gttcgttataaggaccatacatgtaggttttaaaggtttgcggttaatcgatatttacat -catagaaattctatagtcaaatttacaagactctagatactcactcgttgcagccggcta -ggaagcgctttgtaccttacttcccttttcgttgcgtaatatgaatttcatatagtaagt -tcaaggcactcatacctccgtgaagagggtagatagactattaaagttgtttaatagtac -gtattgatggaaatgacccgtaggagatttaccactcaatccacaagattcgctgctgtg -cattatcaaaacagtgcatgtcgaaacatgggttgggtccttcaaacacgaatccaggta -gagatacctttgcaatttttcgatgaaggcgaccgagataaatgagctataacactgtat -gtcttttgattgctataaaacacagaaacggatattaatttaggccgtaaccaacatctg -ttatttgacatagaacagatggtcctttacagcgtattccggccttaatattgaggtcca -gtgtattgtcctcctttaaagaagttgattgtaactgacttaaataagacatgtcaccca -ttcactgggttgcaactgctggccctttttgtccatcgcacgctaatgtgataacagtac -cgccctcacacctgcgtttaaaagacataaatgtcgctatgaaggttattcattaatttt -agctgttttcttagaaaaggtaaatttaaaattgaaaaggctagaaaactaaagttacga -caaatgtgtttgtcaagtaggcgggcatcattgagattgtaagaaataaagccataacca -gccccggaatagaaaatgttaaggaaaggcgatcttctttgaattcttattgtcaagtgc -agtcatacgttcttatcagaggacattgcaataaaatctaacaccctcccttgtgtggtt -gggccatttgtacttcgaagcgtccaccatgtgcagaggataacggaatgtggttccgtc -ccataaacgatcattctcgcccacttagtggcgcggtaaatcgctctcatagaggtaact -ggcctgtaatgtccaatgttaggctaccttctccaactttagtacaacgaataatgtccg -attaacaaggagtcaatttgtcatgaccagttcattcaggtacttgtatctatacggacg -cgttccagagtagtatttgaaattttgaggttctactgataagtttagctatcgctgtat -gtctgaataagaatttaatgtttatcttcgataaataacaattaacaactcctaggtgat -acactgtgaagtctgctgttccccaaattacatatgctattttgttcacataccatgaag -ttaagctaagtgctctataatggcataaacggttatcaaactagctcgaatttcttttat -tacgccggaagcggattaactgctgtagatcaaacacgttaggatagtgtcgttttcata -tatatctaaattcggtctaacatgcattacccatgcttgataacgtagcacttcgcagtc -taattatgtaatgtccgtttaaccaaactttaaaaaagtttatctatcaccagtgatgcc -tcatcgtgactacccggatctttagcctttagggtctaaacagaactaatattactacgt -gtcatactccggagcttaaccaggtgaaacttatttgttaaccaaatttagtgacagggt -agaaatacgtatcaaattaacccagcaatacaataagcatgaaaataattgtaatcgggt -ttgggccggaatcccgcttggcgaaaacttaatgacatagtgtgatgcattttgcactgg -attgagccacaaactcaactagcattatgctcaatatttggccagtgttctacggtttga -aatttataaaggccgcgcaaaagtcttgtagttacaaacgcataaatctcgaacgtaata -ggtttaattagaacatccgtaggatttctgtttatagtagatttatactaaatgttctga -ttagattctgacggccttacccatacaattaataaagacgaatatattagttatagttta -ctatccaaataaattaagcgaatcgaaataaactgtcacgatactgggcagttatcaact -tatcacttatacagttcggacactctatattggtctgtgagtactctatcaaactaactc -ataagttaactgcgcttccattaaatttcaatacgttcttgtgctgtgtacaaacctata -atcgaataaatgacacatatggagatgcataataaaaaaaacggctccatatttctcgtt -aatcgggcattcttaaggaggagcatctcaccaaaaataacgttcctgataagtcttaac -tattagaccgtcttcgggaatgaacgaaacctcaagctagcatggtatagttcttgatag -cgcgtaaattctgataatactgggcggacagctggaaattagttgccagtgcacctacgc -aaatagtttacataaatcaacgggctccgaacgtaaatacaaagggttagttacatcgca -acaagatgaaaagcatgtatgtctaccgtcatccgtccctaacaaacataaaggtggtga -agaatctcgtaggtcaactataactccatccttgaagcaactactccgcgtccgtgtgcg -tagttcgcaacgagaacactactgaaaaaagctaaacaactctcggtacaaatgcggctt -gtgtcgataaagttggtggtagtgcacggaataacataacaaggaatattatttattcaa -attttttgtgactgttatttgttttctgcctagaatgtaaggatgtgttttttgtgacct -gatagttacgcttatttcaggtccacggtgcgtgagagtgtgtcctataacggcagggga -gcgaagtagtgtcctttagactattcaaggtagaattttgataacgctctataaaaggta -gaaaatcatgattgagcaataagaccccaacttatcaaaaaaggagttctcgacagcgcg -tgtacagtccctataacggctgtatttcctgtgtcacaacaccctcccatcgcactcaaa -atgtagatttatgatcagacgctaacttgttcttagagaaaaatacacgggatactctgt -gcaacgatttcattaataaggtgcagcttgggacttttttggccgtaggctttattaaca -ttcacagtaggtagcgagacttcctatgaaccaatcatgccacgcgttttaacgtttcaa -atataagctaggaagcgtttgccaggacttctataatgcaccgttttttttagtacttcc -ttactagccttagtttatgttagagtctttccaattacaaaggattgaatagccaaaatt -tctacaattctcagcgaacgccagcttaatctaaacacgagcttcaaatattctacatat -cggcaggagtcaatatataaatatgaaaatcgtaccatcctcgtacttttagaccaaacg -tcttcggataattaaatcctttttcaattaccacagtacgtgcattagaactactgctat -gaaagtaaaccttgaaatatagtcctcaagagcgtatccaagtacattgcacgtgtatac -agtcgtataaacgagttgatgttctgacgctagagcttaccattcgttaaacagataact -aaaatttaatggctgagtgacttagtgttttcgacaaacgtcgcggatgtagactattgt -ttataagcaatttttaaaaacatatgttcaaaacggtatgggatatgtcgaattccacag -gggtttatgtaccatagaagtatgtataaggtactaaaggtttaaatctgtgatattcgg -ttcggtgaaaatcagactagtcacacttagtgtctgtaaattagattgggtgaaggtaag -cgatcccgaactctacaaggcatgggatgagattctaccgactccggataacactttacg -atcgcgcataactctagctcttagataagtttaacttgtcgatctcataaacagttcaaa -atctgcgatttattgtatcaaatccatcctctatcttctataatcatctgaaccgcgata -cggcactatgagccaagtgaagattgaatccaagaaagctataattggtttattttagtc -catttaaattaagtccggtataagtgctctgtacaatatgcagtctcatgggcatatacg -ttaactaccttttgatacttcgaattggtaaaatatcgactatcgatttgcagtaaaagg -tgtagagtccaattactctttcctgttacatacgatctcttagtttggacaactagccca -tgatggcgctcctctagcgcatgaacctactttataattacatctttatcgatgaatttt -tttagactgcggaggccttgagttttaacagggctgctaaatttcttaagcgattagacg -gtagcgtcgtacgctacttgcttggaacaggcaccgaaaatattgatctactattgcgtc -aactctattctgctaatagcgatggcaaatcacagaagccctcttagtgacaatagttgt -caactatatctaagtcgacctttactgtatcaacgatcacggagagaattaccgaatacg -aaacctcaggactaaaaaacggaaaggatttgtcgacggtaaatataatacttgttaagg -gtagcgacacaggtatactttgggtgtaaacgtggtgcttcccggaacgattttcagacc -agaaaattgttccggtaaccaggaaatctcgtctgcgttaattcgtgttagtaaacttga -tcttcagactccttcttttcgttgcagcgagacttaaattatatctgcgaaatagtgccc -cgtgcatacttcagatggtaggagataccatttggcccattgtgactttacgcgattaat -taaccgacatacatctgttcctgagctatgatcgtctgaataaattacggtctcctcttg -atacctaatggtttctggagacgtttctcatgttcaaatggatagcaggagatcgcttca -tcaagtttagctacgcagagcatcaaaatatgtatgggaaagtcgatttccaaaccagaa -gggataaagagaaataacggacttctccgtagattagcctgatattttgatgggaatcat -ggcggcacatacgtaagagttgcgtgaacgaatattttggacggcgggagacacatatcg -gccattcgttaaggtctctatattggacatcacaagcttagcagtatgagctactaacac -tcaagacattattgattttttcaagatatgtttcattcctctaccgctattcccatacgt -tcgattcgccgggtgagcgaaaccacgggactgaggttaagctaatcaataacaactcgt -tgcgatagagacctatgtatactagagagaattccccaacatttttacaaaaacaaagca -gactaaaatagatacagtccctccatacaattaggaccaacatgttattgccgatcctag -cacacacaccacaaactcagaacttctgtcttacctatgaaagggtctgcacttctgatt -gtacgtgtctaattagcattaatattaaaactaattaggataaactataggtacgagctt -tactataagtcactaggtgttttccgatcgaaaaacgggaccttcaagccttggtaagta -catttaggataaagaaaaaaaggaaggtacgtgactaatctgtctaaactgacaatagag -tagtacctacatgcttcatgtcaagtcttaatacgcaagcgctctcgttatactgctcaa -caaaactcataaagttggactccatcatttagaatcatagggaccaaaacatttatttgc -tactgtcactttgtaggtgttctattctgaattcctcatattgatacatgaatcggaata -cctgtggatcccttaggacgcacgtgctttctttacgtcagaatacatattgtcagaatc -gagaagttccatgcaattaagaattcgcctctttgaaaactcatatccccacatataggg -tccaccgttattcggaaacgatataataattattccagcgttgagcgtcccttaagagcg -cattttcgcttggcctttcttctacgactctacaacgcaagtggctgtgtggagtttacc -acagcgcagcaccccatagaactacctctgagagcgcgagatggtggcagtatgctctgc -agctagcgtttagaacgcgcccgcgcattaaccagtcatattaaaatggactgtcttaat -tgtcggcattaggagcaatattaactgatgagggtatcggtcgcagaagtaatgacggaa -atacgcctctagtccgcagagatacgattacagactcagatcccctaacaagcaaaacga -ttaaatcggaatcactccccctatgacatatttgaaatacacaagaaaccacgcaacatg -tcccgcattctcaaccgcgctttataagatgttgagtctgagaattagatgacctaactg -caagaatcatggcgagtttatctagtaggcaagtctgtaccctagggttcgaacgctgtg -acgtcgtgatcggtctaaggacttagatgataaccaagaactggtttaccgagtactttc -actattaggagtaattacatgcgttcaccgcggaatacgacgaaattttttcatatcttt -atgagcgagatcgtgtcgtctttgcattgcaacagtcgctaccagtaattgctgatcaat -tatagattcattatacagatgcttacttttctctattcaatactgtcatgagttgttttt -aaataagcaccagaattatgtcgcctagtataatcttgcttccacttgaatcaatgcgat -ggacagtattctactgattgcaaagtagtctatttcggcttagcagtacgcatgcctatt -tttttgcaggcacagaataatatgcaactaggattctcggcatccaattaacaggctaaa -acaccaccgaaagacaggtaatctacgaagttgatgtttactacagaaagcgaatgatat -cacttggagaacattttagatgcccccttttaatctagactgagtgtaccaatatatcac -cggtctaccgaatcagcttgaataaaccactctagtactcatgataaccgagcatacaca -tgtatttctcaatgcactgaaggtgaactgtttacaccataccttgcgaatcaacgtggc -gacttatacttctgtctttgagtacagcacaccctaatgaatctaagttagttgttgata -cgaattgtaatttgactggatctcgcctcctcatctagattcttagagaagatgtttctt -atagccggtactgtaactttattgatctggtttatggtaatcaacattttacctctattt -aaacgtccttgcgccgtgcactcaatcctgatcggtttagattcaagcgattatcgagtc -tggaggccgagaaaagaaatgcacagagtaagctctctattgcgacatctacgtagaaac -tcgcatttcagatcgagtaagcaactctcattgtgttgattcagtaatacaagattacct -acgcttctacgaaatatactatagatttagcctacgtcacctttagtgtcgagtcggagc -tttgaagatcggatgcggtgtggactgtgtataggcaattttgctgcgagctcgtgactt -ttggttgatgtcgatatcaatgggatacctcaaacgtctttatctctggataactcacat -tgagtataccggtaaaaatttattctattcatctaaatagtcagtgagggctagggtcgc -aatcacattaggccacatacacatacttaacatgttctattgacccgacccaactttagt -agcattgtagccgtttatgcaaatatgccaggcgccaaacactagccagagggcattttg -ttacatttatttaatcgattattacacagtcggaacacgcctacatgcgttcgacttatt -tgcgacatggtcaacaattcagtaatttaatccaaaacctaaagtcagagacatgacact -aaaatcacattaaggtcagttagtgaaggaatggctaaccagctagagaatgcatcatta -acaggcacttattgtcaaatattttccagatctaagcaacatcacgttaaaaagtacaac -aatcacttaaaacacatcagtccaggtgtaattagaaagccgcttagtaggcaagcgtag -gagtataaatgtagacaatagtcgggacttagcagacactggatgcagtcatagaagatc -ttgcataacacgttagggttagagctacgaacgcccatcattaactgcctaaagcgtgcg -tgagcttagcgctaacttttccaacacgtttgtgatttcgttcataatgtatcaatttca -cagtcatatacagggagtgtagaaaaatcgcaattaacatacgttgacctatttttgttc -agagttcagttagagcctaatgattcgagagcaataatcaggacagcctcataggaagtg -tcaatcacttagaagctatattattataaatcgctctttactgtcgtcgaaggaacgagc -gagagagaatcagttgcctgcaactggcttaacaatatgatacataaaaatattttcatc -accactaagacggtggaattcagacttattggcaacttaggatgggactattaaataacc -cataagatgttgggataaagttacgaaacgaaagggatatagcctgttagataggaaatc -cccaataaaacatagccggcctccacagcagtgatctattccgccacgcgatatctttat -accacgcaatataccaataataaaggttaaatgtggttgcgatttaaaaaagatacatat -cagttgcaccgcgtagcaaatccgtatgtgaagcgtaacctagaattatagcgtctgcaa -gttctctaagcttcctctgcaagatacaatatgactttttagcttttttactaccaaatc -tcagaatcttagaaacaggttggtacgtgctactcggaattcccaaagtaccctgctata -tatgccattccttcattggtccgggctcaccatggggccatcatagtaatagaaggtagt -aaaactagttgatttccgacttttaacaatcactatcctgacccagatatgggttccgac -tggcccttactccagtaagggcagacacacagacaacgagaacttgataactttgaattc -tcaaatcgatcattgcaacgtgacttatttactagcctactcctataattcatacgtcaa -atacatttcaacggaggaagataataagtaaatattcactaaataatggtcgaaggagtc -ctttgccaacataagtccacatatgcgctatagattttttcttggggttcatattcaata -agataaacagcaagagtatcacgtcagcgagtcattgagatcttggctagcattgtgata -gcatattctacctaaatggtagtctagcacagagtggataagatatcagttagatataga -caagtactataacagatctcgcttcgttggattgtatggctagctttgatgatatgattt -tataaaaattgatccagacctgacctggccaattatattcattttttatgagtaaaaata -gataaccatgaaaatactcaagccccttaggacgtacaaagtggtaacataaatttcagg -tgttattctgcaaccacacctgttttgggttttcaaaaaggctaagcagattggttttac -agataatccctgaacactggtatctcccaacgatgtcgttcccaacccttgctgaccttt -taagctctgctgaagttttgtaaactaggcggaaaatatgttcgatagatccactcgcct -gaggtagaaattcgtcttagtaacgcctctttggattacacagaatagtgtactgacacg -tacactgctgcagcagccatacgctaacattaaaattcgttgagtctacatttgttgtta -ttcggattatgttattgggaatagtatttttattcccctgcgtgaaaccacatggataga -ttagcctactcctaaagactcccttttggtctacggttcaattctcttactgagtttatg -ttcgtaattatatcggcgcagtgaatctcctaattatcaccggagttaccagacgccatg -aacttatggccagaaacattgcatgtggcctacataggattagtatcaagagtttacgtt -tgcaacgacatttgaccaacttgaccattcctgcttgtagaccgcgggaactcccctgca -cgcgactatagaagttggtggtggatgtggcttatgccgcaaatatggttttgaaaaaag -taatctattgcttgatacctgaattgagacatgtataagggctattgccagatgaaaaac -tgcatataaggtcaaacaatataagaacattatacataggatcttagcgttcctcaggat -ggtatacgctataaagtctagcttcagcagctaaggagttttgccagtgcggacttccgc -tggaagattaggtttaaccgccctgacatcttcataaggtcgggcctgattcaaacccct -ggagtgccgtctcatacttgaattaatcgatggaaaacttcttctagtctaatattatta -ttaacaaatgacggttcaataaataacaccgtaagggtgggaaactgttaagtgatgaat -cattttaacctatcatccattagctacagataatgataccccgatccgactagggggtaa -gtggttgttccgttaggataaaccatgtaaaacgttagagggtttgtagattaattggta -ttccagataaatgaggtcagggcgagtgatcaattacactgaaaaattgtcagcttgcgc -ggtagttgttaagacagtataaatgaaggggattcagaagcaagtttctcgattgactga -atttataaaccagtcgtcaatcatgatttttgtgtcgattaaagcctaaatggtaattta -aaccattgatatttatcgagtctataaatatctttggttgtatattacttcacaatcacc -aattctaaatgattcttccactgtgcgggtggagatatcaggacgggttaaggttgacct -acatcgttttgatacaacaaaaatcaaagcacatggctggggacttctcgatactatctt -tgagatagtacgggcaagagtgggtgacgcctccctacattttcaagtctatcggataac -ttctcggtaaaacgctcgcgatatagttttaaagcattgatttaatccacgcaggagcaa -gttttaccggtcgaatgagaaaattcaacgtaagtgtcatatccagtcatggttagccaa -aagcatgggttatccaaaaggaataaaacagctcttcaacaaagagatgaggcttcataa -cttcgatgaatgcgtatggttctgatatatagatcgatgcatgaggacactttattttag -ccggcgaattaatggaatccatacgttacttatttggacatgacttctaggtgtttttgc -tgtcccgtttagcgatatttacagattagtatttcgtttctcatagttaattgtatctag -atactaactcgttgaagacgcataccttgccatttgtacaggacttaactgttccgtgcg -taatttgaatttcttataggttcttcaaggcacgaatacctcactcatgaccgttcatac -tctagttaaggtcgggaatactacgtatgcagggaattgtaacctaggagatttacaact -ctttaaacaagagtcgctgaggtccaggatcaaaacactgaatctcctaacttcgggtgc -ctccgtaaatcacctagaaacctactcatacatttgcaattttgagatgtaggcgaaaga -gagaaatctgctttttaacggtatctcttgggattccttttaaaaacacataacgatagt -aatgtaccaagtaaccaaaagctgggatgtgtctgtgtactgatccgccgtgtcagagta -gtccgccatgaatattgacgtcaaggctagtgtcatcaggtattgatgttcattgtaaat -gaaggaatgaactaatgtcaccaagtaaagggggtgaaaatgctccccagggttctacag -acatagagatggtcagaacacgacccccctctcaacgcagtgtatttgaaatatatggac -atatctaccttattctgtaattttagatgtgttctgtgtataccgatattgataagtcaa -taggcttgattacgtatcttaagacaaatctgtttcgcaagtaggaccgcatctttcaga -ttgtttctttttatgccataacctgcccaggaattcaaaaggttatcgatacccgatatg -ctgtgaattattattctaatggccactcattcctgcttatatctggaattggcatgaata -tcttacaacctaaagtctggcgttgcgccagttctacttcgtaccggacaccatctccag -tcgttaaccgaaggtgggtacgtcacataaaagttcattagaccacactttgtgccgacg -tatatagatatattacacgtatagggaatgttttctcctaggtgacccgaccttctacta -aggttgtacatcgtataatggcccattaactacgaggaaagtggtattgacctggtaatg -cacgttcttcgatatataccgacgaggtaaagtctactattgcaaagtttgacgttatac -tgataagtttagatttccctggatcgcgcatgaacaatgtatgcgttatctgccatatat -aacatgttacaaatccttggggatactatcgctactatcatcggaccaaaattaaatagg -ctagtgtcttatcagaacatcatgtttaccgaactgatctattttccaatttaagctgat -attacgtccgcgtatttattttagttccccggatgacgattatctgagctacatcataca -agttagcatactcgccggtgcattgatttcttatttcgctatatcttcaagttcacaggc -ttcatatagttccaattagcagtataattaggttttgtaactttaaccatactttataaa -aggttatattgcacaactgatcaagcatccgctataacccgagctttaccagttagcggc -taataacaaataagatgacttcgtgtcatacgaccgtcatgatcatgctctaacttaggt -gggaaccaaatttaggcaatgggtagtaataagtataaaatgataccacatatactataa -caatgaaattatttgtaatccggtttgccaacgtatcccccttcgcgataaattaatgac -atagggtcatccatgtgccaatcgtgtgtgccaaaatctcaaattcaattatcatcaata -ttggccaagtgttataagcgttgaaagtgatataggccgccaaaaagtagtctacttaaa -aaccaatatttatcgttcgttattgctggtagtacaacatcacgagcatttctcttttga -gttgatttatactatatctgctgatgtgattatgtcccacttacccagaatattaagaaa -gtcctagattgtaggtatacttgactataaatataatttaagactatacaaataatctgg -ctacattatgccatcgtagaaactgataacgtagtaacgtcggacactagattttggtcg -gggagtaatctagcatactaacgaatttgttaaatccgctgaaagtatatgtcattacct -gcttggcctgtcttcaatacgtttagactattaaggactcatttcgagatccagtattaa -ttatacgcatccatatttatactgaagacggattgagttaggacgacaagctaaacaaat -attaagttaaggattagtattatattgtagaaactcgtcgggttggaacgattcatcatc -atagaatgcgttacttattcagagagacttaattcggttatgactggcagctcacctgga -aagtaggtgaaaggcaacagaagaatattgttgactgaattctacgggctacgaacgtaa -ttacaaagcggttcgtaaagagcataaagatcaatacaatggatctctacagtattacgt -aaataacatacataaacctggtgttgattcgactagctcatagattaatcattaattgaa -gctacgaagacgcggaagtctgcggagtgagcaaacagtaatcgactgataaatgcttat -aatatcgcgcttaaatgccgcatggtgtacattaacgtgggggtagtcaaaggaatatat -ttactaggaatattagttatgcaaatgttgtgtcaatgtgatgtgttttatccagacatt -ggatgcatggctgtgggggcacaggatacttaccattagttcacctacaagcggcgtgag -agggtctcagttttagccagcgcagagaagtacgggcctttagacgattaatgctagaat -tgtcataaacctcgtgaaaagctagttaataatcatggtgctagaagaacacaacttttc -tataaaccagttctcgactgacagtcgtaactcactatatcgccgctttgtactgtcgca -aaaaaacctcacatagaaagaaaaatctactgggtgcatcagtagagatcgtgttctgag -agataaatacaccggatacgatctgcatcgagttcatgtattaggtcaagcttgggactg -ttgtgccagtagcattttttaacagtcaaagtagggtgagacacgtcatatcataatata -tgccatcgaggtttaaagtttatatgataagctagcatgcgttgcaatcgtattcttgaa -tgctccgtggtttgtactaattcctttatagactgagtgtatcgtacactcggtacaatt -acaaaggatggaagagcaaataggtcttcaattataacagtaccccaccttaatctaaaa -accagcttcaattagtattaatttcgccaggagtatatatataaatatctaaagactaaa -agactcgtacttttacaacttacgtcgtagcataattaaatcatgggtaaatgtcatcag -taagtgcattagaaatactcctttgtaaggatacagtgaatgtgtctcagcaagtcagta -gaaatggaaattcatactcgattaaggcctataaaactgttgttggtatctacagagtga -ttaaaattagtgaatcagattacgaaaatgttttcccgctcgcacttacgcgtttagaca -aaagtacaggtggtacaattggctgtagtagaattttggtataaaataggtgataaaccg -gatgggtgtgggcgaattcaaaagcggtttttgttccatagaactatgtagttggttata -aaggttgtaatctcggagattaggttagggcttaatcagaatagtaacaatttctctatg -taaagtacagtgggtgatcgtatgagttcacgaactcttaatgccatgcctggacaggat -aaacaatacgcatataacttgacgatcgagcttatatcgacctatttgagaagtttaacg -ggtcgatataatatacaggtcttaatagccgattttttctagaaaagcaatcctatatct -tagttaatcagatcaaccccgtgaacgatatatcagcaaactgaacattgtatacaacat -tcctttttgtccgggtggggactccatttaaagtatctcacctagaactcagcggtaata -gatgcagtctcttgcccagttacggtaactaaatgttgatacttagaattgctaaatttt -agtctagacatttccaggtaaaccggtagacgacaatttctctgtcatctgtcataagat -cgcttagtgtgctcaaattgcaattgagggccctactatagacaatcatcagacttttta -attaaatagttttccatgaatgtgttgtcaaggcggaccccttcacttttatcacggctc -ataaatgtcgtatgactgtagtcggtagcggccttcgagtcttcaggggaaatggaaaag -aaattaggcttctaagatggactataatcgattaggctaattccgttcgcaaatcacaga -agcaatcttactcaaaattgttggaatcgatagcgaacgcgaccgtgaatgtttaaaagt -gctcgcacagaattacccaatacctatcatcacgacttaaatacccaaagcagttgtagt -cgcgtaatagattaagtctgaagcctagagacaaagggatactgggcggggaaacctgct -ccttcacggtaacatggtaacaacagaatttggttaaggttaaaacgaaatatactcgga -gtgaattactgttaggtttcgtcatcggatcaataagtagtttccgtgaagacactctta -tattagatctccgaaattctgacccgtgcattaggcacttggtaggagattccatttgga -acttgctcaatgtaagccagtaatgttccgaaataattcgctgcaggagcgaggagccgc -tgaataaaggaccctcgcatcttgttaccttatggttgagggtcaccgttctctgcgtca -attccagagctggagatacattcatcaacgttacctacgcacagaataaaaagatcgagc -gctaactcgttttcctaaacacaacggatttagacaaattaccgaatgcgccggagagta -gcatcttagtgtcatgcctatcatggcggctcagtacgaagagttcaggcatcgaatatt -gtggtagcccgcactcaaagttccgccattaggtaagctatatattgtggtcagaacttg -aggacaactatgagctactaaaaataaacaattttgtcatttgttctagatatgtggcat -tcatcgaacgcttgtaccagaagttacattcgcagcgtgagcgaataaacccgaatgagc -gtaacattatcaataacatatagttcagatagagaacgaggtattcgacagagaattacc -caacattggttattaatctatgcagaataatttagataatgtcactacataatattagga -ccaaaaggtgattccccagaagacaaaacaataaacaatctcacatattcgctagtacct -atgtatgggtatgatcttctgattggacggggataatttccaggtatattaaaacttatt -accataatctagacctaagagaggttatataagtaaagagctgtgttccgatagaaaaac -ccgaccttaaagacttgcgaagtaaattttgctttaacaaaaaaacctacgtaagggaat -attctgtataaactgaaaagtcaggtgtaactacatgagtcatgtcttcgattaattaca -atgcgatctcgttattctgatcaactaatatcataaactgccactacatcttgtacaatc -attcgcaacaatacttttatgtgctaaggtcacgtgcttcctctgctatgctgatttaat -cagattcataaaggaatacgaataactctggatccattaccacgcaagggatttatttac -ggctgattactttttggctgttgacagaactgccatgaaagtaagatgtcgcatcttgca -taaataatagcacctaatatagccgacaaagtgattccgataacagattttaagttgtcc -agccttgagactccatgaagaccgcttgggagcttccccgtgattagaagaatctaaatc -ccaagtggatggggggagtttaaatctcagcaccaacaaatagtacttcctctcagagcg -cgtcatggtcgaaggagcctatcctgatagaggtttgaaagcgcacgcgcatttaactgt -catattaaattggaatctcgtaagtgtcggcagtacgacaaattttaactgatgtcggta -tacggagaagaaggaagcacgcattgaagcagctacgcagaactgagaagatgacactct -aagatacaattaatacaaaaacgttttaagcccaatctatcaacagatgtaagatgtcta -atacacaagaataaaaccttcatgtcccgatgtataataacagctttatttctgctggtc -gaggtgaagtagtggaaattactccatcttgctgcgcgtctttatagtgttggctactct -gtaaccgacgcgtccatccctctctcctagtgatccgtatatccaattagaggataacca -acatctgcgttaccgacgaatttaaatttttcgactatttaattccgttcaaacccgtat -tcgtagtaagtgtttcatagatttatgaccgacatcgtgtacgagttgcagtgcatatgt -agataccactaattgctgatctaggatacatgctttataaacatgcttacttggctattt -tatttactgtcatgtgggggtttttattttcaacaagtatgtgctaccattggataatct -ggcttcaaattgaagatatgcgttccaaacttgtctactgtttgctaagtaggagttgtc -ccattagaactacgcagcacgtggtttgtgatcgaaaagaataattggcaaatacgaggc -tagccttcaaatttaatgcagattactcctcagaaacacacgtaagcgacgaacgtgatg -tttactacacaatgcgtatcatagaaattcgtgataatttttgttccaacctttgaatct -agactgagtggaaaaagatttcaccgggataccgtttatgctggttttaaaaactcgtcg -aatcatcttataactgcattcaaatggatttctcaatcatctgtacgtcaactgttttaa -caataacgtcagaataaaccggcacaatgagacggcggtctttcactacaccacaccctt -aggattataagtgacgtgtggattcgaattctaaggtgacgggatctacaagcctcagct -acattaggtctgaagatctttcgtatagccgcgtatgttactgtttggatatgggttatg -ctaatcaacagttgacagcgagtgaaacggccttgcgacctgaaatctttacggttacct -tttgattcaagacaggatcgacgatggaccacgtgaaatgaattcaaaactgtaacatcg -cttgtgcctcagcgaccgagtaacgacaagttcacatcctctatgcaactatcattgtgg -tcattaaggtattcaagattaactaagagtcgaccatatattctagagttttacaattag -gaaccgttagtctagactaggagcgtgcaacatcgcaggaggtgtggactgtcttgaccc -aagttgcctgacacatagtgtcttttgcttcatgtccttagcaatgcgatacctcaatcg -tagttttatcgggataaataacatggtgtttaaccctattaatggtttctattaatctaa -attgtaaggcagcccttgggtcgaaagcacattaggccacatacacagtatgaaattgtt -cgagtgtccagaccataattgactaccatggtacacggtgttgctattatgactcccgca -aaactcttgacagagggaattttggtacattgatgtaatcgatgatttaacagtaggaac -tagacgtcatccgttagactgagttccgacatgctcaaattgtcaggatttttatccaat -aactaatggctctcacatgtaaataaaatcacattaacgtcacttagtgatggattcgct -aaacagatagactatcattcatgaactggcactgtttcgattatatttgcaacatcgaac -atacttaaagttaaatacgacatcattcaattaaaaaaattcagtacacctctaatgagt -atcccgctttggaggaaagagtagcactttaaatggacaatttaggccggactttcctgt -aaatggatgaagtcattgtacagcttgaataaatcgttagggttagtccttacatccacc -atatgttaatgaataaagcctgagggaccttagagctaacttgtccaacacgttgctcat -ttacttaataaggttgaaatgtatcagtaagtgacagcgagtgtagattttgaccattta -actgaccttcacagttttgtcttcagacgtcacttacaccataatgatgacagagcttgt -agatgcacacactcattcctagtgtaaatcaagtagtagctagattattataaagagata -ttttctggcgtcgaacgtaacacagagagagtataaggggcatgataatggcttatcaat -atgtgtaagaaaaagtttttaatatcatctaactcggtggaatgcacacttatggccaac -tgaccttgggacgagttaagataccataagaggttgcctgtaagttaagataacaaaggg -atattccatctttgtgtgctaagaacctatttatatttgcagccataaaaacctctgtgc -tatgcagccaccagagttatttatacaaagaaagagaccatttagatacgttaattctgc -ttgcgatttattaaacagacatttcacgtccaaccactacaaaagccctatcgcaagacg -atcattgtattatagcctatgcaacgtagctaagcggccgaggaatcataaaatatgaat -tgttacattgtttactacatatgatcacaatctttgtaaaaggttcgttcgtgatactac -catgtacctaactaacctgagatatatgcaatgacttatggggtcagcgcgcaacatccg -caaagcatagtaatacaaggtaggaaaacttctggatttcccaaggttataatgctctat -actgaccaagagatccgttacgactcgcaatgaatactctaagggcactcacaaagaaaa -ccactaattgataaatttcaatgataatatcctgaattgcatcgtgtatgagttacgaga -agtcgcatttaatgaattagtcatagaaatgtcatagcaggaacataattactatatttt -aacgatttaatcgtagttggagtcctttcccaaattatgtcatcagttccgatttagatg -ttttcgggcccttcttagtaaagaagttaatatccaagactagctcctcacccacgcatg -cacatattcgcgagaagtctgatagaatattcgacagaaatgcgactctagctcacactc -gttaactgatcaggtacttatagacaagtacgttatcagatatcgcttcggggcattgtt -gcgctacctttgtgcatagcagttttgaaaaattgttcaagacctgaacgggaaaatgat -attatttttttaggaggaataatacagtaccatgtaaatactcaaccaccttacgtactt -cttacgccgaacatatatggcacgtgttattcggctaacaaaactgttgtgccttttcta -taaggataagcagattcgttttaaacatatgacctgtaaactgggatctacaaaagaggt -acttaaaataaattgcgcaacggtttagatctgcggatctttggttaaagagcaccatta -gatgtgccatacttcctatcgcctgagcgagaatttagtctgaggaaccactcttgggat -ttaaaacaattcggttaggacacctactcggcggatgaagcaatacgataacattaaaag -tcgttcagtctaattttggtcgtagtacgatgagctgatggccaattgtatttttattaa -cagcactgaaacaaaatggagactttagactaatactaaagtctcaatgttcgtcgaacc -ttaaatgctcggaatgaggggatcttcggaagtatagcgccgaagtgtatctcattatta -taacaccagtgtacagacgacatctaattatggccagaaactgtcattgtgccattaaga -ggattagtagatagtctggaccgtggaatagaattttgaccaaattgaccagtcctgctt -gtagacagcgcgatctaaactgcacgagaatatacaagttggtggtgcttgtggctgagc -acgctaagatgcgtttgtttttacgattctagtgcttcttaacgcaattcagtcttctag -atccgctattccaacatcaatatctcaatttaaggtcaatatatataacaaaattagaca -gagcagctgacacttacgaagcatcgtagaaccgatatagtcgaccttatgatgatatgg -acgtgtccaagtccgcacttccgatgcatcttgacggtgaaccgaaatgaaatcttcatt -agggcccccatgtgtcaaaccactcgagtcccgtctctgaagtcaagtattactgcgaaa -aattcgtctactattagtttattatgaacttatgacgcttaaataaattaaacagtaagc -ctgggaaaatgttaaggcaggaatctttgtaacagttcataatgttgctaaagattatca -gaccccgtgaagacttcgggctttgggcttcgtaccgtagcataatacatctatatagtt -agaggcttgcgtgttgttgtgctattccacatatagcagctctgggcgactcttcaatga -aaatgaaaaatggtaacctggcgacctacttgttaagtcagtttaattcaaggggattaa -gtaccaagggtcgagtttctctgtatttattatactgtaggcaagaagcttttttggcga -gatttaagacttaagcctatggtaaaaatttgatagtgagcgactatagtaagagatttg -ggtggttagtaattaaaattctcctatgctaaatcaggcgtacaatctgagggtgcacat -ttctcgacgcgtgaaccttcaccgaaagcgtgtggattatacaaatttcaaacatattgg -cggggcacttatccataatagatttctgtttgtacgccaaactctgcctcacccctccat -aaattgtattggctagaggttaaattctccgtaaatagagacacatatagttttatacaa -ttgtttgaatcaaagcacgagaaacttttaaccgtacattgacaaatgtcttcggatggg -gcagagcatctcttcgtgacccaaatcaatcgctgagcaataagcaagaaaacacagatt -atacaaagagatctggatgaagatattcgtgcaatcactatcgttatgttagagagttcc -atgcatgaggactcgttttttgaccaggagaattaagccaagaaataactgacgtatttc -caaatgaattctacgtgtttttcctgtcacctttagccagtgttaaagatgactatggag -tttcgaataggttattctatagacattataacgagtggaacacccataccttcacagtgc -taaaggtaggaacgggtacgtcaggtagttcaagggattttaggttcttaatccaacgaa -gaaataacgcatcacccgtcattctattgttttcgtcgggattacttagtaggcagggta -ttctaacctacctgagttacaaatctttaaaaaactggccatgaggtcatggtgataaaa -tctgaatcgcctaaattcgcgtccctaaggaaatatactagaatccgtctcagaaagtgc -aaaggttgacttcttcccctaacacagaattctcagttttatagctatctagtggcattc -ctttttataaaactttacgtttgtaagggtccaactttacaaaagctcggatgtgtatgt -gtaatcttccgccgtgtaagacttggaacccatgtatattgacggcatggcgtggctaag -caggtattgatcttcagtgtaaagcaaggtatgttctaatctaacaatgtaaagccgggg -attagccgccaaaggggtctaatgacatagagatgctctgaaatcgtaccaactataaaa -gcacgggatttgaaatatagcgacagatcttccgtattctgttagttgacatctgtgctg -tctttaccgattgtgatttggctttagcagtcatttagtttcgttactcattgctcgtgc -gatagttccaccgaatatggcacattcgttctttttttccattttactgcaaaccttttc -aaaagctgatcgataccactgatgatggcattgattagtcgattggcaactatgtcctgc -ttatatctccaattgcattgaatatagtaaaaaataaaggctcgccttcccaatgggcta -cggagtacacgaaaaaatcgcaactcgtttaaccaagcgccgtacctaacatataagtga -ttgagacaaatagttctccagacgtattgagatatatgtctcctataggcaagcgtttct -aattgctgaccagaaattagaattaggttgttaatactatattcgaccattttattccac -gaatgtgctattctactggtattgctccgtatgcgatatataaccaacacggaaagtcgt -cgattgcaaagtggctccgtagaatcatttctggtcatttaccgggagcgcgcttgaaca -atggatgcggtatctgccatattgttattgttaaaaagacttccgcttactatcgcttcg -atcatcggaaaaatattaatgaggattgggtcgtataagaaaatcatcttttcagttcgc -agatttttgccaatttaaccggttatttcgtcagacttggtagtgtagttacaagcatca -cgattatatcagctacagaattaaactgtcctgactcgacggggcagtgtgtgagtattg -cgctatatattcaaggtaacaggaggcatataggtcatagtacaaggataatgaggtttg -ctaactttaaaaattattgatttaacggttgattgaaaatctctgcaagatgacgctaga -acacctgatgttcaagtttgccgataataacatataagatgaattactgtctttagaccc -tcatgttaatccgctaacttagggcggaaacaatgttaggctatgcggagtaagtactat -attatgataccacatagaatttaacattcatatgatgtctaatacccgttcccaaccttg -caaccgtcccgattaattaagcaattacggtcatcaatgggccaatcctgtctcaaaaat -tatcatattcaaggttcagctattttggcaatgggtgagtaccgttcttagtgatttacg -aacccataatctaggcgacttaatatacaagatttagagttacgttttccgggtagtaca -tattaacgaccatggatcgggtgaggtgttgtattagttatctgatcttgtcagtagctc -ccaatgtcccagaatattatgtttctactagagtgttcgtatactggaatttaaatatta -tgtaagactagacaaattttatggatacattaggccatcgtagaatatgatatagttgta -acgtccctctatagattttcggagggcaggtatattgcttaataaagatgttcggaaatc -agcggaaaggatttgtaattaactgatgcgcagcgcttaaataagtttagactattaagc -tatatgttcgacagcatgtagttttttttaccagaaagtgttatactgatgacccatgga -ggtagctcctcatgataaaaatattgttacttaagcattactattatagtgttcaaacta -gtaccgttgcatactttaagaatcagacatggcgtttcttatgcagacacacttttttag -ctgttgacgcccacctcacatccatagtaggtcaatcgcataagaacaatattctggact -gttttattacccagaagaaagttttttctttccggttcgttaagacaataaagatcattt -cattcgttctcttaacgatgaactaaagtacttaaagtatccgcctcttgtttcgactag -cgcatagtgtaataattaaggcaagataagaagaacaggaacgcgcacgtcggagataac -tctaatagtctctttattccgtttaatatagcccgtaattgcaccatgcgctacagtaac -ggccgccttcgcaaacctatttatgtaattccaagtttaggtatgcaatggttggggcaa -tgtgaggggttttatcaagactttcgttgcttcgcggggggcgcaaagcagactttacag -tagttaaccgaaaaccgcagggagtcgctctaagtgttaccaacccctcactactacgcg -aaggtactcgattattccttgaatgggctgaaacatcgtgattagcgtcttatgattcag -gctgatagaagaaaacttattttctatattccacgtatacaatcacactcgtaactaaat -agttcccagcgttgtaatgtcgctataataaataaaatacaaagaaaattcgtctgggtg -cataagtacagttagtcgtctgtcacataaataatccgcagtcgatctcattacaggtat -tgttgttggtcaaccttcgcaaggtggtccaagtagcattgttgaacagtaaaactaccg -tcacacaaggaatatcataatagatgccatacacggttttacttgatatgtttacagtcc -ttgagttgcaatcgtagtattgtttcatccggggtgtgtacgaagtaatttagacaaggt -gtgtagcggtcactaggtaaaatgacttaggatggatgagcatttaggtattctatgata -acactaaccatcatgtttctaaaatcctcaggaaatttgtattattttaccaacctgtat -ttatagaaagtgcttttgacttaaagaagccgaagtgttcaaattaaggagtacctgatt -gaaagaatggggaattgtaatctgtaactcaattacaaataagccgttctaaggattaag -gctttgtgtctaagcaactcacgtgaattcgaaattcatactcgattaacgactttaata -ctcttctgcgtatctacagactcatttaaattacggaatatgttttcgtttttggtttcc -agctcgcacgtacgcgtttacaaataaggacacctggtacaattggctggagtacaatgt -tggtttttatttgctgattatcccgatccctgtgggcgttggcataaccgggttttcttc -aagactactttcgtgttgcttatatacctggtaatatcggtgagtagcttagggcttaat -cacaatactaacaagttctctatggattggacagggcggcatccgttgactgaacgatct -attaatccattccctgcactggataaacaagacccatttaaattgaccatagagatgtta -gcgtcatatttctgttcgtgatagggtacatatattataaacggattatgagcagtggtt -ttctagaaaagcattcatagttaggagtgtatcagatcataccactgaaccatagagcac -aattctctactggctatacttcattcctttttgtccgggtggggacgaaatttaaaggtt -ctaacctagaacgcagagcgaattgatcaaggcgctggccaagtgaacggttctaaatgt -tcttaatgagaattgcgtattttgactattgacagggcatcgtaaaccgctactcgactt -ggtatctgtaatctgtatgtagatagagtacgggcctataattcaaattcagccaccgaa -gattcacaatcttcagacgtttgaaggaaagaggtttactggtatgtggtgtcaagcccc -acccattctctgttatatccgagcattaatgtagtttcactgtactacggtcacgccgta -gagtcggcagggcaaatccaaaacaatttaggctgagaagtggcactataatagtttagc -ctaagtcccttcgctaataactcaacaaagatgacgcaaaagtcggcgaatagattgcgt -tcgcgtaagggtatcttgaatactgatagctctcatggtaccaagaactttcataacctc -tttatttaccaaacctgttctactagcgttagtgttttagtctgtagccgacacaaaaac -cgagaatggccggcgtaaccggcgcctgcaagctaacatgggatcaaaactattggctta -acgtttaatcgaatgagactagcactgtattactctttcgtttcggcagcggatcaataa -ggaggtgacggcatcactctcttatagtagatatcacttattctcacaacggaagtagga -tcttccgtcctcattaaatttgcaactggctcaatgtaacactgtaatgttaacaaagta -tgagctgaaggtcagagcagacgatgtagtaaggtccctcgaagctgcttacagtatcct -tgaggctcaacgggctatgcggaaattccagacctcgagttacattatgaaacgtgtcat -tccatctcattaaatagttcgtgccctatcgccttgtaatataaacaaccgtttttgtct -attttcccaaggagaaggagagtagcagcttagtggcttgcctatatggccccctaagta -cgtactcggcacgcttagaagattgggctaccccgcactatatgttcccaaagtaggtaa -cctagatagtgtcgtatgaaattcaggtcatcgatgagagtataaaaatatacaattttg -gcaggggttatacattgcgggcatgaagagtaacattggacatgaacggacattcgaacc -ctgtgagtttaataccctatctccggatcattataaagtaaatatacgtcacttactcta -cgtgcgtttagacagtctttgaaactaaattggttatttttctttcatctagatttgtct -gtatctaactaaattatagttccacataaagctgattcaactgaagacataaatataaac -tttctaacatagtagcgaggaaagagctatgcctagcatcggatcatgcgtccgcgagta -gttcctggtagagttaaaagtttttccagaatctagaccgaacacagggtagtgaacgaa -agtgcgcggtgaacatacataataccgaacgtaaacaattccgttcgtattgttgctgta -tctatatttcctacgtaaggctatttgatctataatatgaaaagtcacgtcgaaataaat -caggaagcgcttcgagtatgtacattcagatctccttagtatcatcaaattatagatttt -acggccacgaattattggtctagatgtcccaaaaataatttgatgtcagtagcgatcgtg -cttcctcggagttgaggttggaagaagagtcattatgctataccaagaactctccatcca -gtacctagaaaggcaggtatgtaccgctcattaattttgcgatcttgacagatctgcatg -caaagtaacttgtaccagatggcttttataatagaaactaagtttcccgaataacggtgt -acgataacagatttttaggtgtacagacgtctgactcaatgaacacacattgggacctgc -cccgggaggagtagtagataattaccttctccagcgcgggtcttttaatatcacaacata -aaaatactaattaatatcacacaccctcatcctcgatggagcctagcatcatacacgttt -gatagacaacgccaattttactgtaatatgatattcgaatctagtatgtggacgctgtac -cacattgtttaaaggagctccctttaccgacatgaacgaagcaagctttgtacaagatac -gaagaactcagtactggtaactataagagacaatttatacataaaagtgttaagaccatt -atataaaaagaggtatgaggtctttgtaactacaataatacattcatcgaacgatggaga -ataacagagttatttctgctgctcgagctctagttctgctaatttctcaatcttgatgcc -actcgtttgagtcttccattcgctcttaacgacgcgtacatccctctctcctactcttac -ctatatcctattactggttaacctacatctccgggaaagacgtaggtaaagtggtccacg -attgtattcacttataacacctagtagtactatgtgttgctgagagtgaggacacactta -ctctacgagttcaagtccatatggacattacactttttcagcatctaggtgtcatgatgt -attaacagccgttaggggctatttgattttatcgattgtcggcgtgtgtattttcaacaa -ctaggtgctacaattcgtgaataggcatgaaaattcaagattgcagttcctatcttgtat -aatctttcctttggacgagttgtaccatttcaactaacctgcaagtggggggtcatccat -atgaagatttgccaaatacctggagaccctgaaaagtttatccagattaataataacaaa -caaacctaagcgaagaacgtcagctttaataaactatcactatcatagaaattcctgtta -attgttcttccaaacgttgaatagactatcacgggtaatagattgaacacggagaacgtt -tatccggcttgtaaaatatcgtcgaatctgctgataactcaattatattcgatggagaat -tcatatctaccgcttagcttttaaaaattaagtcagattattccgccacaatgagaaggc -gcgagtgcactaatcaaatcacttaggattattacggacgtctgcattacaatgctttgg -ggtagggttatacaagcatatgattctttaggtctcttgatcgggcgtttaccaccgtag -cttaatgttggcatatccgtgatcctaatattctgttgtcagcgtgtgtataggaatgca -caacgcaaatctttaagctgacctgttcatgaaagacaggagacacgaggcaccacctca -attctatgcaaaactctaacatagcgtggcactatgagtacgtgtaacgacaaggtctca -tactcgatcctaagataattctcgtctggaaggttttaatctttaactaagagtagaact -tagtttattgacttttacaattaggatacggttcgcgactctaccacagggcatcatacc -tggagctctgctatctcgtgaccaaagtggcagcacacatagggtcgggtcctgcatcta -ctgagcaatccctttaagcattcctagtttgagagccatttagatattgctgtttaaacc -gattaatggtttctattattataaagtgtaacgctcccattcgggacattgaaaattagc -aataagacaatgtatgatattcggcgagtctcaacaacattatggtctaccatgggacaa -ggggttgatatgatgaatccacaaaaaatagtcaaacacccatggttcgttaagtgaggg -tatccaggtgttataaggacgatctagaagtattcaggtacacggtgttcagacatgctc -taattgtcaggttgtttataatttaacgtatcgctctctattctaaataatataaaatta -accgctcgtagggatgctttccagtaaaagatacactatcattaaggttatgcaaatgtg -gcgatttgatttgaatcttagtacattcttaaacttaaatacgtattatttaaagtaaat -atattatctaaaccgcttttgtctatccacatttcgtcgaatcacgacctcgttaatgcg -acaatttacgaccctctttcatctaaagcgatcatctatttcttctgattgatgtaatac -tgacccttactccgtacatacaaatgatggtaagcaagaatgactgacgctcctgtcacc -tttcgtggcaatcaactggcgctggtactgaagtagcttgaaagggatatggatgtgtat -gccaggcttcattttgacaatttttctgtcctgctcagtgttgtctgaagtcgtatcgta -cacaataatgatgactctcattgtagatccaatcacgctttcctacgctaatgaaagttc -tagatagtgtaggtgttagacagaggttagcgcctacatccttacacacacagtgttgaa -cggcaagcataatcgagtatcaatagctgtatgtatttgtttggaatatcatatttctcc -cgcctttgaacaatgatgccaaaatgtcctgccctagagttatgataaaataactgctgc -cctgtaacttaagtttacaaaccgatattcaatcgttgtgtcctatgaaaatatttatat -ttgcaccaagaaaatcatctgtgcgatgaacaaaacacagtgatttataaatacaaagag -tacatttagttaccggattgcggcttgacatttattttacagaattttatcggcaaaaca -cttcatatgaactatcgcttcacgataagtctatgatagactagcattcgtagagaacag -gaagagcaatcattatatatgaagtgttacagtgggtactacatatgagatcattaggtc -tatatccggccttcctcataagaccttggaaatatcttacatcagagatatcaaaggaag -tatgggcgaacccagaaaaagccccaaagaatagtaattcatcggacgtaatagtctggt -tttaactaggggttattgatatttaagctaaaagagttccctgaacactcgaaatgtata -atctatcccaactaaaaaagtatacctctaattcagaaatgtcattgagattagactgat -gtcaatacgctaggaggtaagacaagtagaagtttttgatttaggaattgaaatgtaata -cctccatcttaagttctatattttaaagttttatgcggacttcgagtaagtgcacaaatg -atggcataagtgcccagttacatgtttgcggccccgtatgagtaatgatctgtttatcaa -tctctagctactatcccacgaatgcactgatgccagtcatggcgcttacattagtcgaca -gaaatccgacgatacctatcacgcgtgaactgttctggttcttattcaattcgaagtgat -ctcagatacattacggccatgcttgcccttcatgtctgctgagcagttttgttataggct -gaatctcctctaagcgaaattgataggatttttggtggtcgatttagtctgtacctgctt -attaagattcaaaatgacctacttcttacgccgaaatgatagggatcggctgaggaggat -aaatatacgctggtgcctggtatttatccagaacaagttgcctgtgtatcagatgaactc -taatctccgagataaaaaacaggtacgtaaaataaaggccgcaaagggttacatctcagg -atcgtggcgtatagtccaccattagttctgacttacttaatatagactgaccgagattgt -agtatgtggatccaagcttgccatgtaaaacatgtcggttagcaaaacgtataggagcat -gatcaaagaagagttaattaatagtactgcactataattgtcggcggagtaccatgagct -gttgcccaattcgatgtttattaacagcacgcataaaaaatccagacttttcaattagaa -ttaactataaatggtccgcgaaccttaaatgatcggaaggacgggatctgccgttgtata -gaccccaactctatctaatttttataacacctctgtaatcaacaaatcttattatgccat -cattatgtcattcgccaagtaagtccagttcgagattctctggaccgtgcaatagtattg -tcaaattatggtaatggaatccttcttctaacacccttagaaaagccacgagaattgaca -agttgggcgtgcttgtccaggagcaacataagtgccgtttctttttacgatgatagggat -tcttaaagcttttctctattctagatcccagttgccatcatcaatatctcaattgatgct -cattatatagttcttatttagtatgtccagatgtcactgaagatcctgcctagaaccgat -attctcgacaggatcatcagttcgacggggcaaacgcacctatgcacatccatcttgacc -gtgaaacgaaaggaaagagtcagtaccgacccaatgtggaaaaaaactcctgtccacgat -atgtaggcaagttttactgcctttaattagtagtcgattagtgtagtttgatattatcta -ccttatagaatgtaaacagtaacccggccttaatggtttggcaggattctttgtaaaagt -taataatgttcataaactttatcagaaaacctgaagtagtccgcctttcgcctgcgtaac -gttgcagattaattcgttttacggagtggcttgcgtcttgttgtccgagtacacatattg -ctcctctcccccactcttctaggaaaatcaattatgctaacctgcagaccttcttcttta -ctatctttaatgcatgcccagtatgttcatagggtagacttgctatctattttgtataat -ctacgaatgatgcttggggcgcgacttttaacaattaagccgttgggtataatttgagag -ggtgccacgatagtaagagatttccggcgtgagtaaggaaaatgataataggattaagca -ggcgtaatagctcaccctcctcagttctccaaccctgaaccggctaagtatgactgtgca -gtattaattttgaatacatattgcagcccctaggatacattatagatgtctctttcttac -ccaaactcgcccgcaccaagaaagaatgtggattcgattgaggttaaattagccggaatt -acagacacagattcttgtttacaattgtgggaagaaaaccacctcaaacgttgaaaccta -cattcacaaatggattacgttggggatgagaatcgattccggtcaaaaatcatgcccgga -gcaataaccaagaattcacagaggattaatacacttctccatgaagataggactgcttgc -actatccttatctttgtgtcttccttcaagcaccaatcgtttggggacaaccacaattat -gccaagaaataacggaaggtgttccaaatctatgagtccgcggtttcatcgcaacgtttc -actgtgggtatcatgactttggactttagatttgggtattctagagactgtagaaagact -gcaacaacaagacattcacagggcgaaacctaggaaaggggaccgcacgttgtgctaggg -atgtttccttaggaatccatacatgtaagaaagaatcaaccgtaattatagtgttttcgg -ccccttgaattacgtgcatgcctttgctaaaagacctctgggaaatagattgaatattct -ggacagcagcgaatcctgattatatctcaagcgaatatatgacccgcaagaaggatttat -actagaataagtctaagaaagggcattgggtcacttcttccactaacacacttttatcag -ttttataccttgagagtcccatgcatttttatatatatttaactttcgttgcgtaaaact -ttaaatatgatccgtgctctatctctaatctgaacaacggtatcacgtcgaacaaatcta -gtggctacgaatcgcgtcgctaagaacggtttcttctgctggcgttagctacgtatcttc -tatgctaaaaatgtatagccccgcattagcagcaaaaccgggagaatcaaatacacatcc -gatgaaatcgtaacaaagataaaacaacgcgatttctatgtttgccaaagtgattaagtt -gtatcgtaggggtcagcgctgatgtcttttcagtttgggttttggatttaccagtctttt -agtttcggtactatttgatcgggacattcgtccaaacatgatggctcattcgttcttttt -ttcaattttaatcaaaaccttgtatttacctgatacattaaactgagcatcgcatggagg -tggagattcccatatatgtaatcatttgatatcctattccattctttttagttataaata -aacgctccactgcacaatgggagtaggacttcaccaataattagcatctactgtaaacaa -gcgccgtaacgaaatgattactgattgagaaaaataggtctcaacaacttttgacagata -tgtatccgatacccaagcgttgctaattgcgcaaaagtaagtagaattacggtcgtatta -cttgttgccaaatggttattactccaatgggctattctaatccgatggatacgtaggaga -gagtgtacctacaccgaaactcgtagtgggcttagtggctacgtagaagctgttcgggtc -agttacagcgtgcgaccttgtaaaatcgatcacggtgatgaattattgttattgtttaaa -agaagtcccctgaatagcccttagataatacgaaaatttgttatgtccagtcgctcgtat -atcaaaagattcggttaagttcgcagagttttgccaagtttacaggtgatttactaacac -ttgggagggtacgtacaaccatcacctggttagcagagaatgaattatacggtcatgtcg -cgaagggcaagtgtgtgagtattgaccgagttattaaacgtaaatgcaggcatttacgtc -ataggacatcgagtttgtcctttgcgaaatgttaaatttatggttttttccgttgagtga -taatagctgcaacatgaagatagtaaaactgaggttaaactttcaccatattaaattata -tgttcaattacgcgatgtacaaactaatgttaatcagatttaggagcgcgcttaatatgg -gtccctatcccgactttgtacgagattttgataaaaaatagtattgtaaattcatttgat -ggcgtagaaccgggcaaaaccttgaaaaaggacacatttaggatgctatttccctaagaa -agcggaaaatcctggctcaatatttataatagtaatggttaagattgtggcccaatcgct -gagtacccgtcttacgctttttccaacacataatcgacgagaatgtatttaaatgtttga -gacttacgttttccgcgtacttattattaaagtcattggagagggtgtcgtctgggtgta -gttttctcatctgctcaggagctaaaaatgtaaatctattggttgtttctaattctgtcg -tccgtgtaggctatttaatttttatggtacacttgaatatgtttagccataatgtagcca -atactacaatatcagatacttgtatacgacctatagacttttgccgaccgctcgtagagt -gatttagaaagatgttcggatagcacagcaatcgtttgcgaatgtaagcatgcgaagcga -gtatttaactgttgactattttgctatatgttactctgaatgttgttttttttaccagaa -tgtgttataatgatcaaccatgcacgttcctactaatcatataaattttgttacgtaagc -ttttctatgatagtggtctaaagactacccttgcatactttaagattaagacatgcactt -taggaggaactcacacgttttgagctgttctagcccacctataagccattcgtccgcaat -cccataactacaatagtcggcaatcttttattacccagaactaacgtttttatttcccgg -tacgtatcacattaatcttaatttaatgcgtgagagtaacgatgaacgaaagttatttat -gtttaagccgcttcttgagaatacagattactgttagaatgaaggcatcataactagaac -accaacgcgcacctcgcacattactctaatagtagctttattcagtttaatatagacagt -atttgaaccaggcgctaatgttaaggcccccttcgaaaaccttgttatgttattccatgt -ggtcggaggatttgcggggcgatagcgctgggcggggatcaacaatttcgttcatgcgag -cgcccccataaccagtaggtacagttcggaaaagaaaaccccacgcactcgctagaagtg -ttacaatcacatcacttcgtaccgaagggactactgtattccgtcttggggatgtaacag -actgattacagtcttatgatgaagcctcattcatctaaaattagttgatttattccacgg -atactatcacactcctatagaaagagttaccaccgtgggaagctagatataataaataaa -agacatacaatattagtatggctcatgatctacacttactcggatctctctttttttata -accagtagatcgcattacacgtattgttgttccgcatcaggccctaggggctcaaacttc -catggtggataactaaaacgtccgtcactaaacgaagatattaatagatgaaatacacgg -gtttacttgatttctgttcagtcattcacgggaaatcctaggagtctttcataacggcgg -tcttagtaggaatgtagtcaagctctgtagaggtctcgacggaattggtatttcctggca -tcacaatttacctagtattggagatcacttaaaataatgttgagataataatcaggatat -ttctagtatgtgacaaacctctatttagtgattgtgattttcaattaaacaagacgtagg -ggtcaaattaacgactacatgttggaaagaaggccgaattgtaatatctaactcatgtac -taagaagaagtgctttcgtttaaggctttctgtctaacattctaacgtcaattcctatgt -aatactactgtaaccaagttattactcggctgcgtagataaagtctcatgtaaatgacgg -tttatctgttacttttgggtttcaacctagctaggacgccggtactaattacgacacctg -cgtatagtgcagggtgttcaatgtgcctttttatgtccggattataaccatccctctccc -acttggaatatcaccgggttcttaatgacttagttcgtcttccttattttccgggtaaga -tcgctgtggaccggacccattttgatctagtctaaaaaggtatatagcgtttcgtctggc -ccgcttacgttcactgaaacttagattaatcaatgcactgcactggattaacaagaacat -gttatagtgtactgacacatgttagactaagaggtctgttcgggttagccgacttatatg -tttaaccgattttgacaactgggttgagagataacaatgaagagtgaggactgtagaaga -tcttaaaactgtaccatagtgctcaattcgctaatggcttgaattatttaattgttctaa -ccctggcgtcgaatttttttggttcgaaaatacttagcacagcgtattgttcaacgagat -gcacaactgtaccgttagaaagcggcttaatgacaaggcagtattgtgactattgacagg -gaatcctaaaaagctactcgaattggtatatggaagaggtatgtactgagaggtcgcgcc -tattagtcaaattctgccaaagaagagtcaaaagcttaactagtttgatggtatgaggtt -taatgctaggtggtctataccaccaaaaagtatatgggatatcccagaatttatcgactt -tcaatcgtctaccgtcacgacgtacactaggcagccctaatccaaaacttttgaggatga -gtactgccactattatactgtaccatttgtaacttacattttatatcttcaaagaggtag -atattgtcggccattactgtcacttacactaagggtagcttgattactgatacctctcat -ggtaaaaagtaatttaagaacctatttttttacataacctctgctactaccgttagtgtt -ttagtcggttcaagtcacaaaatccctgtagcgcacccctataagcagaaggaaacctta -atgcggataaaaacttttgccggaaccgttaatcctatgagaataccactcttggaatcg -gtcctttaggctgaggatatagaacgaggggaacgcatcaatctaggttaggtgagagaa -ctttgtatcaaaacgcaagtaccatatgccgtcctcagtaaattgccaaatgcagaaatc -ttacactcttttcttaactaagtatgagagcaacctcactcctgaacagcttgttaccta -acgagaagaggctttaagtagcctggagcctcaaccggatatccggatttgactctcatc -cacttacatgatgattacggtcattacatctcatgattttctgagtgccctatagactgg -gaatttaatctaccctgtttctatttgttaacaaggagaaccactggtcaagatgacgcg -cttccatttatgccaccataagtaagttctcggaacccttacatgattggcctaccaacc -tatatatgtgaccaatgtacggtacatagagtgtggcctatcatattcaggtcatcgagc -tcagtatttaaagattatatggtcgctgggggtattcagtgcgcgatggaagactaacat -tggaaatcaacggaattgacaacacgctcactttaataacctatctcaggataagtttaa -tgtaattagacggaactttctctaactccgtgtactaactctttgaaaataatgtgggta -tttttatttcatctagatttgtctgtatcgaaagaaagtattggtccaaataatcctcag -taaaatcaagtcataaatataaaatttagatcttaggacagaggaaagtgctttcccgag -cataggatctggcctacgccagtagttcatgcttgtgttaaaagttgttactgtttatag -tccgtactcagggtagtgttcgatactcagcggggaactgacatattacactaaggaatc -aaggcccttcgtatgggtcatgtttatatatttaattacttacgctatttgatcgagaat -agctatagtaacgtcgtaagaatgcaggatgcgattcgagtttgtaaattcacagatact -gtgtatcatattattatagatgttaaggcatagaattattggtattgatgtacaaaaaat -tatgggtgggcagtaccgataggcattacgagcagtgcagcttggaagaactggatgtat -cctataactagtaagagccttaaaggtactacatacccagggatgttaccatcattaatt -tggccatcttcaatcttcgcaatgcatactttcttctacaagatgccttttagaagacaa -aataagtgtcaacaataacgctgtaacttaactctgttgtacgtggaatcaagtctcact -aaagcaactaacattccgacatgcaaacgcaggactactagattattaaattcgccagcc -cgcctcgtttaatataacatcataaaaattctaagtaatatctcacacactaatccgcca -tcgtccatagcatcagtcacctgtcttacacaaacacatgtttaatcgatgttgttatgc -caagctagtttcgcgaccatgtaactaattgtggaaagctgctaccttgaacgacatcaa -ccatcctacctttgtacaacagaccaacatctctgtactggtaaatagatctgaaaagtt -ataaatataactgttttcacattgatagaaaaacagctatgtgctatttgtatatactat -aataaattaagcgaaacatggagattaaaacagtgttttctcatcctccacctcttgttc -tgctaatttataattcttgatgccactcgtgtgagtcgtccattcgatcgtaaagaaccc -gacataaatagatacgacgctgaacgagatcctatttctcctgaaaattattagcacggt -aactcctagggatagtggtactagttggtatgaacgtataaaaacttgtactactttctc -gggatgtgagggagcaaactattactcgaccagtgcaacgcattatcgacagtaaaagtt -ttcagctgatacctgtctggatggattatatgcaggtaggcgagagtggattgtagcgat -gctcggcgggggtattttaaaaatctaggtgataaaagtcctgtttagccaggaaaagtc -atcattgcactgcatatcgtcgattagctgtcatttcgtccactggtaccagttcaacgt -acatcaaagtccgggcgcatccatatcaagttttgcaatagtactccagaccatgaaatg -gttatccagattaataataacttaatatactttcactacatactcagcgggtattaaatt -tcactttatgtcaaaggactcttatgtggtcttcaaaaaggtctagagtctatcacgcct -aattgtgtgaaaaccgagtaacttgatcagccttgtaaaatatagtagaatatgatgtta -aatcatttatattccagggagattgaatagcttacgattagctggtataatttaactcac -atgattaagcaaatatctgtaggaccgagggaaagaataaaataaagtaccatgagttcg -gaacgctgcattacatggcgttgggctagcctgatacaagaagatgagtatggagctctc -ttcatcgggacgtgacaaccctagcgtaatcttggcagatcccggagcagatgattatcg -tctaacactgtctttaccaatgcacaacgcatagatttaacctgaactgttctggattca -ctcctgactacagcctacaactcatttctatgcataactcttaaagacagtcgcaatatc -agtacctctatacacatcggatcagactagatcataagataagtctcctctggatccttg -tattctgttaagtacactacaaatttgtttagtgtctgggacaattacgataagggtcgc -gactagaccacagggcatatgacctccaccgctcctagcgagtctccaatctgcaagcac -tcatacgctaggggcatgaatcgactgtcaatgcactgtaagatttacgagggtgagacc -catttagatatgcctcgtttaaccgttttaggcttgataggatgagtttgtcgatccatc -aaattcccgacattcatattgtccaataagtatatctagcttattcggactcgctaaact -aaattatggtataaatgccgtcaaccggtgcatttgttcaatcaacaaattatagtcaat -ctcccatggggccttatggcagcgtatacagctggtataacgaccatatacaactatgaa -cggactagctgtgaactaagcagattattggatccttgtgtataattttaagtttcgatc -tatatgctatagtatagaaaatgttccgatcgtacgcttcctttacagttaaacagtcta -tatcatgaagcttatccaaagctggacatttgatggcaatcttacttaattatgaaactt -aattacctattattgaaagtatttatatgatcgaataagatttgctctataaacaggtcg -tccattcacgacctagtgattgcgtaaattgaccaacctaggtaatctaaagcctgcatc -tatttcttatcattcatgttatactgacccgttctcagtacttaaaaatgatcgtaagca -agaatcactcacgctcatgtcacatttagtcgaaataaactgccgatgggaaggaagttc -cgtcattgcgatatcgatgtctatcccacgcgtcattttcaaattggttatctacggata -actgtgcgatgaactactataggtcaaaattatcttcaatctcattctagatcatataaa -gatgtccttcgcgattgatacgtctacagtgtgttggtgttacacagagggtagcgacta -cttacttactaactctctcttgatccgcaagcataagccaggttaaagtgctctatcttt -ttctgtggattataatagttataccgccttgcatctaggtgcccattaggtaatgcccta -gtgttttcataaatttactcctgccatctaacgttactttaatttcccagattcaatagg -tctctcatttgaaaattgttatatgtcaacaaagaatataatagctgagtggaacaatac -actgtgagggagtaatacatactctaaattttctttacggtttgcgcctgcacagttttt -tttatctatgtgatccgcataaaaagtaatttcaacgttccattcaagttaagtcttggt -gacactagcattaggagagatcaccaagaccattatttatttagctagggtttaagtcgg -ttagaaatatcagataatgaggtctttatccggccttacgcagtagaaattggaaatttc -gtaaagcactgagttcaatggaagtatggccgaacccacataatgcacaaatcaagtcga -tttcttccgtccttttagtctcctgggaactacgggttattcatagttaagctaaatcag -ttaacggaactagacaaatgtataatagttcccaaatatatatctataaatcttatgcag -ttagggaatgcagatttgaatcatggcaatacgctagctcggaactcaactacaagtgtt -ggatgtacgaattcaaaggtattacatccttatgatgttcttttttggatacttttatga -cgacttccacgaagtgaaattatgttcgaatatctgaacagttacttggttgagcccaag -gatgacgaatgttctgtttataattctcgtcataatataaatacaagcatatgaggccag -tcatggagctttcatttggactaacatttccgtagagtcatatcacgcctgtaatctgat -ccgtctttttctattcgaagtgttatcagatacatgacgcccttgcgtgacattcatggc -tcctgacatcgggtcttttaggctgaatctaatctaacccaatttgtttggattgtgggt -cctccattttgtctgttaatgcttattaagattaaaaatgtactacgtatttagacctaa -tgattgcgatacgctgtggaccattaatataagctgcgccaggggatttttccagatcat -ctggcctgtgtatatgttcaaatctaatagccgagagaaattactccgacggaaaataaa -ggcagataagcgtttcagagcaccatcgtggcgtttagtcaacctttagttcggaattta -ttaatatacaatctcactctttggacgagctccttaaaagatgcccttgtatatcatgtc -ccgtacctaaaagtataccagcatcatcaaagaacagttaaggaatacgactgctctata -attgtccgaggagtaccttctcatctgccaatagtcgttgggttggaaaacaacgcatta -atatgccacacttgtcaattagaagtttctataaaggggacgagtaactgatttgagacc -tagcacggcagaggacgttcgtgtgacaacatctctttataagtttgagataaaatcgct -aatctacaatgattatttgccaatcattatcgaatgcgcaaagtatctcctgttcgtgat -tctagcctaaggccattactatggtcaaattatgctaatcgaagcagtcttctaacaccc -ttagaaaagcaaacactattgaatactgccgccgcattcgccagcaccaacataactgca -cgtgcttttttccatgattggcattatgaaagatttgatctatgattcttaccagttgca -atattcaatttagcatgtgttcctaattattgtgttattatggtctatctcatcatgtaa -atgaagatcatgacgtcaacacagattctagtcaggatcatcagttcctcggggaaatcg -cacctaggaacagccttatgcaaccgctaaacaaagcaatgaggatgtaccgacaaaagc -tcgatttaaaagcctcgaaacgagatgtacgaatcgtttactgccttttatgaggagtcg -agtactgttggttcatatttgctacatgattgtatgtaataacgatcccgccctttatcg -gttcgatcctttatggcgataagttatgaatcgtcagtatctttagatcaaaaactcaac -tagtacccagttccccggaggaacggtcatgattaatgcgttttacggtctcccgtccct -cttcttgtcagaggaatcagtttcatccgatcccactcgatgattggtatagctatttgc -cgaaaagccacaacgtattcggtactatcttgtttgattcccctgtatcttaattcgcga -cacttgatatcttttgtgtttaatcgacgaatcatcctgggggcgacacttgttacaatt -atccagttgcgtttaatggctgtgggtcacaagattgttagacaggtcccgcgtgtcgta -ggaaattgataattggagtttgcaggacgaatagctcacccgcctaagtgatccaaccct -catcaggataactatcactgggcagtattatttttgatttcatatgccaccccctaggag -actgtagtcatgtatctttcttacccaatctagcccgaaacaagaaagaatgtcgattcc -agtcaccttttattagaccgatttacacacaaagtgtcttggtttaaaggctggcatgaa -tacatactcaaaagttgaaaacgacttgctctattcgattaccttcgcgatctcaatcga -ttacgctaaattttaatgcccgctgaaatatccaacatttaaaacaggattaattctctg -atccatgaacttaggactcattgcacgtgacttatctttctctcttaattcatgctccaa -tacggtgggctaaaccacttttatcacatgaatgtacgcaacgtgttaataagctatgag -tacgcgggggcagcgaaacgggtcaatctgggtatcttctattgggacggtacatttcgg -ttttatagactatgtagttacacggcatcaacatgtaattaaaacggcgtaacctaggaa -agccgaacgcaccttgggattgccatgtgtccggaggattacatacatctaagaaacatt -ctaaactatgtatagtcgtttacgacccttgtagtacgtgcatcccttggcgaaaagtac -tctgggtattagagtgtatattatcgacagcaccgaatcctcattttatagcttgacaat -ttatgacccgaaagaaccttttataagtctataagtatatctaacgcaattgcggcactg -agtccactaactatctttgagcagtgttatacagtgagacgccatggaaggggtttatat -attttactgtcgttccctaaaaagttaattatcagacctgcgcgatctcgtagatgaaca -acgcgatctagtcgaaaaatgcttgtggctaccattccagtcgagatcaaccgtttctgc -ggatcgcgttacattccttgcttatttgcgataaatcgatacaaccccattaccagaaaa -acccggagaatcaattactctgcagatcttatactaaaaaagagattacaacccctgttc -tatgtgtcccaaagtgagtaacgtggagcgttggggtaagagcggagcgattttaacttt -cgcttttccattttccagtattgtactttacgttatatttgagcggcacattcgtcaaaa -catgatccatatggactgaggtgtttaaatgttaatcaaataattgtattttcagctgac -tttaaaatctgcagccattggaggtggagattccaatagatgtaagcaggtgatatcata -tgcaattcttgtgacttattaagataccagacacggcacaatcgcagtagcacgtaaaca -ataatgacaatcgacggttaaattccgaacgtaagatatgtttacggatgcactaaaata -ggtagcaacaacgtttctctgagatgtataagttaccaaacactggagaattccgctaaa -ctaaggacaatttccgtcgtattaattgttgacaaatggttagtaatacattcgcagtgg -ataatccgttgcatacctagcactgagtgtaaataaaaccaatcgactactggcatttcg -ggctaacgactagatgttagcctatgtgaaagcctcacacatgcttattgccttcacggt -gagcaatgtttcttattcgttattagaagtcacctgtagagacagtagagatgacctaaa -tttggtttgtccagtcccgaggtgatctaatgattaggttaacttagaacagtggtcaat -tggttaaagctgatttacgaacacttccgaggggtcgtaaaacattaaactggtgagaac -agtatgatgtattcggtcatctagacaaccccatcgctgggagtttggacagtgttatga -ttcgtaaatccaccatgtgtccgaattcgaaatcctgttgctccggggagatagggttaa -tttaggcttttttacggtgtggcatattagctcaaacatcaacattcttaaaatcagcgt -aaacggtcaccagttgatatttgttctgctaggaagcgatgtacaaaataagcttaataa -gatttaggtccgaccttaatttcggtccatagcacctctttctaagtgttttgcttaaat -aattgtattgttattgattttctgcgagttgaacacggaaaataagtcaaaaaggacact -tttaggttcatatgtaccgatgaatgcgcaatagaatcgagaaatttttagattagtaat -cgtgatgattgtggccaaatcccgcactaaacggctttcgctgtttccaaaaaattttag -tccactaggtatttaaatgttggacactgaacgtggaagccgtcgtattatgaaactaat -ggcagaggggctcctctgcgtgtactttgagcagatgctatcgtcagaaaaaggtaaatc -ttttggttctttataattctggcgtccgtgtagcctagtgaatgtgtttggttcaagtga -atttgtttagccagaatggaccaattacgtcattagctgttacgtctatacgaaatatag -actgtggacgacccatcgtagagtcatgtagttacatgtgaccttagaacaccaatcgtg -tgcgattgtaagcaggacaacacagtattgtactggtcaattggttcatagatctgacta -tgaatcttcgtttttgtacaacaatctcggtgaagcttcaaaaagcctccttcctaataa -tcagttaatttttcgtaaggttcctgttcgaggttagtcgtataaagacgaaacggcctt -aatgtaacattaactattccactgtaggtggatctaacaaggttggacatgtgctaccaa -taagataagaatttcgtccgcaatacaatatctacttttgtagcctatcttggattaaca -acaacttacgttggtatttcaccggacgtatcaaatgattctgattttaatgactgagag -taaacatcaacgaatcttatgtatctttaagccgctgcttgacaagtcacattactgtta -gaatgaacgcttcattactacaaaacctaccaccaactcccacattaatattatactaga -tgtttgaagtttatttgacaaaggttttcaaaaagcacagaatcgttacgaacacgtaca -ttaaattgttagggtattaattgtggtcggtgcatttccggccccatagcgctccgcggg -gagaaactatggccttcatgacagcccccccataacatctaggtaatggtcggataacta -taaacaaccctctccagagaactgtgaaaataaaatctcttagtacacaagcgtatactg -gtttaagtcttgcccatcttaaagactcttttcactattttcttgatgcctcattcttct -aatattaggtgattttttaatccgagaatataaaaagacgatagaaagtgttaaaacacg -gcgtagcgacatattttaaagaaatgaaatactttttgactatccctcatgatctaaact -tacgcggagctatctttttgtataacatgtacagagaattaatccgatgcttcttccgat -taaggacatagcgccgaaaacgtcatggcggcttatcgatatcgtaacgcactataccaa -gtgattaagtgatcaatgaatacgggtttcgggatttctgttaagtcatgcacggcaaat -acttggagtcttgaataacgccgcgcgtagtacgaaggttctcaagctcgcgtgacgtat -agaccgtattgctatttcctgccttctcaattgtccgaggattgctgataacttaaaata -aggttgagtttttaataacgatttgtcgagtttgggaaaatcctcgtttgtgtgtttgtc -attttcaagttatcaagaactacgggtataatttacgacgtaatgttggtttgatgcccg -attgcgaatatcgtacgaatggtatttgtacaactgctttcctttatcgattgctcgaga -acattataaagtctattactatggattaagactgtatacaagtgtttaagcggagcccgt -gataatctataaggttttggtacctttatctgttacttttgccttgaaacatacatacgt -acacgggaatatttacctaaacgccgtatagtccagcctcgtatttgggccgtgttttgt -cagcattttaaactgaaagcgcccacttgcattataacccggtgcggaatctcttagtga -ctcgtcaggagtttacgcctttgagacctctcgacaggacccattttgatctagtcgtta -taggtagagtgcctttcctatcgcaccattaccttctagcaaacttagagtattcaatga -aatcatatcctgtttatactaaatgttataggctaatgacacagctgacactaagaggtc -tcttcgggttacccgaatgagttgtttatacgatgttgacaactcgggggagtcatttca -atgaagactgaggactcttgatcagattaaaacgcttaatgactgataatttagattatg -ccgtgtattatttaagtgggcgaaccctcccctagaatgggtttcctgagaaaagtctta -gaacacagtattctgaatccagatgcaaatcgctaacgttagtaagcggctgtagctctt -ggcagtttggtcaatagtcaatcgcaatccgtttaaccgtctactattcctagagcgaag -agctatgttctgacacgtccccaatattaggcaaaggctccaaaagaacagtcaattgat -taactacgggcttggtttctccgtgaatccttgcgccgctataccacataaaaggatagc -ggtgataccacaagtttgcgacgttaaagcgtcgaccctcaacaagtacactagcaaccc -cttagcaattaattttgtccatcactactgccaagagttgactggaccagttggaaatga -catttgatatattaatagagctacatattgtaccactttactgtcacttacactaaccct -agcgtgattactcatacatatattcgtaaattctaagttatgatactagttttgtaaatt -taatcggcgaagacacgttctcttgtacgagcttcaactaaatatttcactgtagccaac -cactttaaccagaaggataccttaatgccgatataatattgtccaggaaacgttaatact -ttcacaagacaaagcttggaagaggtactttacgatcacctgatagatcgaccggaacga -ttctatataggtttggtctgagaaatttgtagctaaaaccatgttccataggaactcctc -tgtaatgggcaaaatgcagatagcgttcaatcgttgcttaactatctatcacagcatcct -aactcctcaacagcttctttcctaaagacatcagcaggtaagttgacggcacccgataac -ccagagcacgattggaatctaatactctgtatggatcattacgctaagtaaatataatga -ttttctgactcaaagttacactgcgaattttatattaactggttctatttgttaaatacc -acaacctctcgtcaacaggtcgcgatgcaagtgatccaaaaatatctaacttataccaac -cattacttctggcgcagaaaaacatagatatctgaacaatcgaccgttaagactgtctcg -ccgatcttaggaacctaatactgctcagtagttattgtttatttgggccatccccggatt -atgtcagccatggaacactaaaagtcctaatctaacctatggacaaaaagctcactttta -taaaattgctcaccttatgttgattgttatttgtccgaaatgtctataactcagtgtact -atctattggaaaattatggccggagttttattgaatatacttttgtatgttgagaaagaa -tgttgtcgtaataattatcagctggaaaatcatctaatatatattatattgagatattac -gacagacctaagtgctttcccgtcatgagcagatggactaacactcttggtaatccttct -cgttttagttggtaatgtttagtctaagtaatatcccgactcttacttactcagagcgga -aatgactttttaaactaacgtttaaaggcacttagtatgcgtcagggttatttttttaat -tacgtacccttgtgcagagagtttagctattcgatcctacttagtatgaaccatgagagt -acaggttggtaattcacagagaaggtcgagaagattatttttgatgtttaccaatactat -gaggcgtattcatcgaaataattttatggctgcgcacttcacatacgcaggaagaccact -gcagcttgctagatctggatgtatcattgtacttctaagagcctgaaaggtaatacattc -ccagcgagcgtaacagattgtatggggacatattcaatcttagcaatgcattcgttcttc -gaaatcaggcatttttgatgtcataagttctgtcaactataaccctggaactttaatctg -ttgttcgtcgaatcaaggatcaagaaagcttctaaaaggcccaaagcaaaacccaccact -acttcagttttaaattagaatcacaccctagggtattagataataattaaatgtcttagg -aagagatatcaaaagatgcagacatcctcaagtgaataagtctccggtctttcacaaaca -catggttaagcgatgtggttttgactagagacgttcgccaccatcgtaatatttctggtt -acctgcgaacgtgaaccaaatcttacttcatacattgcttaaacagtacaacttatctct -tatcctatagagatctcaaaagtttgtatttttactggtttcaaattgagagaaaaactg -cgttctccgatttctatattattgtttaaatgatgccaaacatccagtttaaaacacggt -gtgatcagccgactcagattcgtatcctatgttagaatgagtcatcaaactacggtcacg -cgtacattacagagtaaactacacgaatgaaagagataagaagatgaaagagttaatagg -tctcctgttaattatgagaaccctaactactacggattggcctactagtgggttggaacg -gatataaaattcgactaagttcgcggcatgtcaggctcctaaatatgaagagaactcggc -atcgaattatccacagtaatagttggaacatgattcctctatgcatggtgtatatccacg -tacgccagtgtgcagtgtagccatgcgaccacgggcgttgtgaatattcttcctcagaaa -aggactgttgagcaaggaattggattctgtgaacggaatatagtcgagtagatggaattt -cctacactgcgaaaaggtcatagtaaatcaaacgccgcgcgcagacatatcttcttggca -attagtactccactaaatcaattggttataaacttttagaatatctttatataagttcac -tacttacgctgcgggtagtatatttaaagtgatgtcttaggaatcttatggcggcggaat -aaacggcttgactatagataccctaattctggcataaccctgtaacgtgtgaagcatgct -ttaatagacgactagatcagcttatagaatggatatgactgccacattgaagagattaac -attagcgggtataatgttacgaacttgtttaacaaaatagctctaccacacacgcatagt -ataatataaaggtcctggagttcgctacgagcctggaattgcagttcccctaccctgagt -aaacaagatcagtatggacctatcttctgacccacgtgtaaaaactaccgttagcggccc -tgagaacggtgaagttgattatcggctaacactcgctttaccaaggaacaaacaattgat -ggaacaggtaagcggctggattctatcctgaatacagcataataatatttgctttcaata -tatagttatgacactcccaatatcactaactctttacaaatcggatatgaagagtgaatt -agagatggagccgatcgttccttgtattctggtaagtactcgactaatgtgtgtagtcta -ggggtaaaggtccttaaccgtcgagtctagaactcacgcattatgaaatcctccgagcat -agagactctaaattcgccaagcaataagtcccgacgcgaaggatgagaagctcattgaac -tgtaacatttacgtcgggctcaccatgttacatatgcagcgggtaaaagtttttgcctgg -agtggttgagtttcgcgatacataaaaggccccactttcatatggtcaaatatctatatc -gtgctttggacgactcgataaactaaagtagcctagtaatgccctaaaccgctgcatttg -tgcaataaaaaatttagagtatatataacttccggacgtatggctgccttgaatcctcgg -atatcgtccttatacaacgatgaacggtatagctcggaactatgcagattaggcgatcct -tgggttgaatttttagtttccatagatatgagttagttttgatatggttaccatacgtcc -ctgcattgaaacttaatctgtatattgattgatccttagcaatagcggcacatttctggg -caatatgacttaattaggttacggtttttactatgatggatacgttttatatgatagaat -aacagttgctatttaaacaggtactacattcaactaatactgtttcactattgtgtccaa -catagggaatatattgcctgaatagatgtattatcaggcatcttttacgctccaggtaga -actaattaaaaatgatccttagaaactttcaagcaacataagctaaaagttacgccaatt -ataagccacatcggtaggatcttcaggcattcccatatccttctctatcaatcccgtctg -ttgctaattggttatctaagcatatcgcggcgagcatctacgataggtataaagttgctg -ctatctaattcgtcataatatatacatggaattacagattcatacgtcttcagtctcgtg -gtgtttctaagagcggacccaagaattacgtaatatctctctcgtgttacccaagaagtt -gacacgtgattgtcagctatctttttctggcgatgttaatagttataaacaattgcatat -agctgcaaattagctaatcaaatactcgtttcttaaatgttatcagcaaagctttaggtt -ctgtaatttcactgtgtaaagagggcgctaagttcaaaattggtttttggcaacaaacaa -tttaatagcgcagtgcaaaaataatatctcagggtgtaattatttctctaattggtcttt -acggttggaccaggcaatgggttttttatctatgtgataccaattaaaagtaatttcaaa -gtgacattaaacttaagtattgctgtcaagaccattacgacacttcaccaacacatttat -gtattgtgctacgcggtatggcccgtagtaatttctgatattgaccgcgttatcagcaag -tacgctgtacaaatgccaaatttagtaaagctctgtgtgcattccaaggtgcccacatca -cacattatcaacatatcatgtcgttgtattacgtccttttactagcctgggaaataccgg -tgattcagagtgaacataaatctctgaaagctactagacaaagctagtatagttaaaata -tatatttcttttaatattaggatctttgcgattgcacatttcaagcatcgcattaaccta -cctccgtactcttctacaacggttgcatgtacgatttctatgcgatgaaatacttatgtt -cttagtttggggttactttgttcacctagtcctcgaacgcaaattagcttcgaatatctg -aaaagtgtatgcgggcaccaaaacgatctcgattcttaggtttataattatagtcagaag -ataaatacatgcatatctggacactcttccacatgtcatgtcgactaactttgaactaca -gtcatatatagactgttatctgatccgtatgtgtctattactactcttatctgagaaagg -acccaatggagtcacagtaagcgatcatgtcatcggggctttttccctgattataagatt -acactattgctgtgcttggggcctcctactttttctatcttaatcattttgtacattaaa -aagctaagaagtaggtacaacttatctttcccatacgagctggaccattaatttaacagc -cgcaaggcgagttttaatgttaatctggaagggctttatgttctaagcttttagcactga -gaaattaatccgtaggaaattaatcccacataacccggtaagagaaccttacgccccgtt -actaataatgttctgcgcaatgtaggaagtgacaagctcactcttgcgacgagctcctta -atacaggccctgcgttatattcgaccgtacctataactagaccaccatcttaaatgtaca -gttatggttttcgacgcatagagtatgggaccacctcgaaatgctcagctgcaaattgta -ctgggggtggttatcaaacatttaatatgaatctatggtaaagtactagtttatagatag -ccgaacactaaaggtttgcagaccttcctcccctgaggaacttcgtgtcacaaattagat -tgagaaggtggtgataaaatcgcgtatctacaatgatttggtgcaaatatttatcgattg -cccaatcgttctactcgtactctttatagcctaacgccttttcttggcgctaattagcct -aatccaagaaggagtctaacaaaattacttaaccatactcttgtctattcggcccacgca -tgcgcaagctcaaaaagttctcaacgggcgtttttacttgagtcccaggaggtaacattg -gatctatgagtcttaacagtggaaatatgatttttagattgtgttcagatttattgtctt -attttggtctatctcatcagctatagctacataatgacgtcttaactgtttcgactaacc -ttcagatctgactaccccaaatacaacatagcaaaagaatgatgctaacgcttaactatc -ctttcacgatcttaacaaaaaagctccatttaaaagaatcgaaaacagatctaccattcg -tggaatcaatttttggacgagtactggtcgggtcgtgcttatttgctacaggattgtttc -gtataacgttcaagcactttagcggttccatccttgatggcgttaactgatgatgcgtaa -gtttatggtgatctaaaactctactacgaaccaggtcccagcacgaaacgtcatctttaa -tgagtttttaggtctccaggcactaggctgcgaagtggaatatgtgtcatcagagacaaa -tagatgattcctatagctttttgcagttaagccactaagtaggcggttctatagggtttc -attcaaatcgatcgtaattcccgactctgcatagcgtgggtcttgtatagaccattcttc -aggcccgccacaatggtttcaagtttcaacttccgtttattggctgtccctcaatagagt -cgttctcagggcacgactctcgttcgttattcataagtccagtttgatccacgaatacag -aacacgcatatctgataataaaagcttaacgataactttcacgcgcatggtttatttttg -atttattaggcaaccaaataccagaatgtagtcagcgatatgtagtaaaatttagacaaa -cataaaacaaagtatcgccattacagtctcctgttaggagaacctttttatcaatatgtg -taggcgtgtattggcgcccttgatttaataataattacggctaaacgtattgatattttc -caggaactgccccatctcatgagatgaccctaaattttattcacacctcatttttaattc -ttttatatcacgattatttatctgagcaagcatctttgcaagcattcatagtgacggtgc -tgtctctatgaatgcatgctaatatacggtgcgctaaacatattggttcaattcaatgta -agctacctcggaatttgcttgcactaagacggggaagccaaaacggtaaatcgccgtata -tgctagtgccaagggacttgtccgttggagtcactatggagttacaagcattataaatct -aaggaaatcgcagtatcagtccttaccccaaagatacttcgcattccctggggtacggac -catgaaatacttctttcatacatgataaacgatggagactcggttaccaccctggtagtt -actccatcaattggagttaactaagatcgctattacaggctttattagccaatcatcaca -agcctctttttagagattcacaagttagcaaaccaaagttcctttgataagtctttaacg -agatctatcccaattccggctaggagtaaaatttatatatttgagatcggggttaaagtc -acacgcaatgcaaggggtttttatatggtaatgtccttccctaattaggtaattttcaga -cctccgagagagagtagatcaacaacgcgttatactcctaaaatgcttgtcgataacatg -acactacagatcatccctggatgagcatcgactttcattacttgattagttcagttaatt -cgtttcaaaccattttcaacaaaatcccccagtagatatgtatatgcacatcttagacta -aataacagttttcataccctgggatttgtgtcactatctcaggaacgtcgagacgtcccc -tatcaccgcagcgagggtaactggccctgttccattgtaatcgatgggacgggacgttat -attgcagacccaaagtagtaataaattcagccatatggacggagggggggaattgttaag -aatataattcgattttcagctgaatgtaaaagctccagccattcctcctccacttgacat -tagttcgaagaaggtctgagaattggaattgcttgtgacgttttttgtttccagacaagg -aaatagcccagtaccaagtataatattatgacaatagaagcttaaattcacaacgtaaca -tatctgttagcatgctctaatagaccgagaaaataagtgtctatgtgtgcgagaactgtc -aattcacggcagtagtcacctaatctaacgtctagttcccgactatgaagtcttcacaaa -tggttagtaataatttcccagtggagtagaagtggcataacgtgcactctctgttaataa -tacctttagactactcccatttcgccagaacgtcttgatggtaccctatgggaaacactc -acacatgcttattgcctgcaacctcagcaatgtgtcgtatgcggtatttctacgaacagc -tagtgaaaggactgatgacctaattttggtttctcaagtccagacgtgatattttgatga -ccgtatctgacatctctgggcaattcggttaacctctggtacgaaatagtccgtcgcgta -ggtaaaaatgataatgctgtcatcactatcatgttttagctaagctacactaccccatcg -ctcgcacgtggcaaagtgtgaggattccgatatcatccatgtgtacgaattcctaatact -cttgctcagggcacttagggttattgtagcctgtgttaccgtctcgcatattagatcatt -aatcaacagtcttataatcaccgtaatcggtaaacagttgttatttgttctgataggtag -acagctaataaagatgctgttgaacagttacgtcccacctttattgccctacagtgaaac -tagttcttactctgttgctgtaatatgtctagggttattgatttgctgccacttcaaaac -ggaaattaagtcattaacgaaaatggttccttcataggtaaagatcaatccccaattgaa -gccagaaattttgagatgtcgattcctgatcattcgccaaatttacagctcgtaaacgag -ttccatgtgtaaaaaaatgttgagtccactagcttgtttattctggctcaaggtacgtgg -aacacgtagtattttgatactaatgccagacccgctacgatccctgtactgtgagcagag -ccgatcctcagaaatagctaaatcttgtgcttcgttagaagtctcgactacgtgtagcct -agtgtttgtgttgcgttatagtctatttgtggacacagtatggtcaaatgacgtcttttg -atctgacggcgttaacaaagatactctgggcaacacacatacttctctcatgttgtttct -tcggacctttcataacctttcctggcacatggttagctgcacatcacaggattgtaaggg -tctagtggttcagtgagcggaatatcattcgtcggtggtgttaatctatctcggtgtagc -ttataaatgcatccgtaagaatattatgtttatttgtcggtacgttcatggtagtggtgt -cgccgatttagacgtaaaggcatgtatggatcttgatctatgcaaaggtaggtccatcta -tatacgttgcacagcggatacaaataagataagaatttactaacatttaaattttcttat -tgtcgagcatagattggaggaaaaacttatttacttggtatttaaacggaagtttctaat -gtttatgattggatgcacggacagtttactgcttactttcttaggtttcttgaacaacag -gatgcactagtaacatgtctcgttcatgcttccattaagttcttcttaaacttacacaaa -ctacctaatttagagttgacgagatggttgaacgtgttgtgacaaacgtttgcaaaatgc -acagtatcgttaccaaaaagtacatttaagtgtgtgcgtaggaattctgctacgtccatt -gcaggccacattcacatcccacccctgaatatatggactgaatcacacacaccaaatttc -atctaccttatcgtagcataactattaacaaacatatacagacttcgcggtaaataaaat -atattagtacacaaccgtatactggttgaactattgcccagctttaagacgcttttaact -aggtgcttgatcaagaagtattattatatgacggcagtgtgtaatacctgaatagatata -gacgttagattgtctgaaaacacgccgtagagacatttttgttagatatgtatttctttt -tgacgagccagcatcttagtatctgaagacgagctatatgtttgtagaaaatcgactgac -attgtatacgaggcggcgtaagattaaccaaattccccagaattagtaatggcgccttat -cgatttactaacgatatataacttgtgatgttgtctgcaatgtatacccgtgtaggctgt -gctcttatcgaaggaaacgcattgaagtccaggctggatgaaaccaccgcgtacttccat -gcgtctatacatagcgtcaccgatactacgttttgctatgtaatccattctaatgggtaa -gaggattcctcttatagtaaaatatgcttgactttttaagaaccattgggagtggttggc -aaaataatagtgggtgtctttctcagtgtatagttttctacaactacccctattaggtta -caagtaatctggctttcttgccacttggcgatgatagttagattcgtatttctacaacgc -agttactgtatccatggcgcgagataattagatacgatttgaatttggatgtagactcgt -tactactgttgtagaccagcacgtgagtatctagatgggtttgctaccttgttagcggac -ttttgccgggaaaaagacatacgtacaaccgtatattttactataagcagtattggccac -cctcgtattgcggcagggtgtgctcacctggttaaaatgaaagagaaaaattccatttta -aaacccggaggaatctattactgacgaggaaggtgtttaacccgttgagacatctcctaa -cgtaaaaggttcatattctagttattccgagagtcactttcctatccaaacatgaactga -tagcataatgacaggttgaatggaaagcatatcctgtttattctaaatctgtttcgctaa -tcaatatgctgtcacgaactcggagcttacccttacaactatgtgttctgtttaccaggt -gctaatatcccggcactcttttcatgcatgtcgctcctagcgtcatctgatttaatagct -taatgtctcatattttacagtagccagtgtagtatggaaggcggcgaaccagcccctaca -ttgggtttcctgacataagtattacatatcacttgtctgattacacagcaaaatcgctaa -ccttactttgcgcatgtagctattggaactttgggctagtgtctatcccattaagtttaa -cagtagactagtccgtgagcgatcaccgagcttatgtctcgtacccaagttttggatttg -gatcaaaaactactcgatattcatgatctacgggcttcctttctccgggtatcattgcgc -cgagattaaaaataaaacgatagcgctgtgaaaacatgtttgacacgggatagcgtagaa -actaaacaacgaatagaccatccaatttgaattttattgggtccagcacttcgccatagt -gttgaatggtaaagttcgaaaggaaatttgttatattaattctgctacattttcgaccac -ttgtatctcaaggacaatatcccttgaggcttttagcagaaagagatgccgtaattctaa -gggatgataataggttgggaaatttaagagcagtagtaacggtcgcgggttcgaccttaa -actatatatttaaatctagccaaacaagttaacaacaaccataaagttatgaccttatta -tattggcaagcttaacgttttaattgctctagtaatagagtggtagaggtaagggaccat -cacctgattcttcctccgcaaccattatatagacgtgtcgtctgacaaatttcgagataa -aacattcgtccttagcaacgaatatcgaatggcaattagccacattgagttaaatagttg -aggatatttcttgcacagaatcagatctaatctaatgattcgttactaaacacttcacca -ggtatcgtgaaggctcaagattacccagagaacctttgcaatataagaatatgtatgcag -cattaccctaagtaattatattctttttctgactcaaagtgacaagccctagtgtatatt -aaatcggtatatttgggaaattcctcaaactatcctaatcaggtagccatgaaagtgatc -aaaaaagttcgtacttataccatacatgaattctggccaagtaaaaaatagattgcgcaa -aattcgtaccttaagtctctcgccaagatattaggatcctattactcatatcgtgttttt -ctttattgccgccatccccggagtatctcacccatccttctcttaaaggcctaatattac -ctatgcaaataaacatatattgttgaaaattgagaacctgatcgtgattcttatgtgtac -catatgtatagtaatcacgcgactatatagtgctttagtatcgcccgtgggtgagtgaat -attctgggctagcgtgagatagtttcttgtcctaatatttttcagatcgaatagcttcta -tttttgtgtttattgacatatgtcgaaactccttactcagtgaaagtcatgaccagatcc -acgaacaatcttcggaatcagtctcgttttacggcggaatcttgagtctaacttatatcc -cgtcgcttactttctaacaccccttatgtatttttaaaattacgtttattcgaacgtact -tggcggaagcgttattttttgaagtaagttacattgggcagactcttgacattttcgata -cgactttctttcatccatcacaggactcgttcgtattgatatcagaagctcgtgatgatt -agttgtcttctttaccaatactttgaggcctattctgcgaaatttttgttgccctgcgaa -cttcacataccaaggaacacctcgcaacatgccttcatatccatcgttcattgtaattct -tacacaatgaatcctaagtaattacatccctgcgtaaaagatggtaggggcactgaggat -atattaccaagcatttagttatgagtaatcagcaatgtttcttgtattaagttctctaaa -atagttacatcgtaatgttatctcgggttccgcgaataaacgagatagattcattatata -tggccctaagcaaaaacctcctcgtattctgttggtaattagaatcacacaatacgggtt -gagatattaattatttgtagtacgaagagatataaaaagatgaacaattactcaagtcaa -gatgtatacgggatttataataaaaatcgggtagagatctgctttgcaattcagacgtgc -cactaaatcgtaatatgtcgcgttacatcagaaagggtaactattattaattaataaagg -gcttaatcactacatattagatcttatccgatagtcttatctattcgttgtatttttaag -cggttctaattcagtcattatatcagtgctccgagttctttattattgttttaaggatga -caaaatgcctcttgttataacgctgggagaagcagactaagagtcggagcagttggtaga -atgaggctgcaaaagacggtctcgacgaatggacagactttactaaaccaatgaaagaca -gaagtagagcaaagtctgaagtggtatcagcttaattatgacaacccttaatacttccct -ttcgccgaatactggcgtggaaaggttttaaaagtcgaagtagttagaggcatctctcgc -tcataaataggtagactactcgcaatccaatgtgactatgtaatactgggaacatcagtc -cgcgatgcagcgtgtttatcaaccgtccccactcgcctggggagacatgagaccaccccc -gtggggattattagtccgcagtaatcgactcttgacaatccttttcgattatgtcatagc -aatttacgacagttcagcgaagtgactactcggcgaaatggtattactaaagcattcgaa -cccacatgaatgtgattcttggcaatttctaatccactaaagcttttccgttgaatctgg -ttgtagatatttatataagttcactaattaagatcacggtagtatattgatagtgatgtc -tttgcaagaggttggccgaggaatttacggattctctattgatacaatttgtctggctta -taactcttaaggctgaaccaggcgtttttagacgacttgatcagctgttagaatggtttg -gactccctctttcatgtcagtaacatttcagccgttattgttacgatatgcttgaacaat -attgatctaccacacacccatagtatattttataggtcatgctgttacctacgagcatgg -tattccacttcccattcaatgagtattcaacatcactagcctcagagatgatgacccacc -tctaataacgtcacgttgcggccatgtgaaacctgaacttgagtagacgatatcaagcgc -tttaaattgcatataacatttgagggtaaagctaagcggatgctttatataatcaatact -caataataagatttgattgcattttagagttatgacacgacatagttcactaacgagtta -ctattcccagatctagactgaagtactgatcgagacgatccttacgtcgatgatcgttag -ttatcgacttaggtcgggtctctagcggtattggtacttaaccggacactatactaataa -cccatgatcaaagcataacagaatacagacgataatttcgccaacatatatgtacagacc -ccaagcatgagaagctcattgaaagctatcattgaagtcccgctcacaatgtgtcttttc -cagacggtttaactggttcccgggagtcctggagtttcgacttacataaatggaaacaat -gtattttgctaatttatctatagcgtcatttggaccaatacagaatattatgttgcctag -taatccactataacccgcaagtgctgatagaaaatttttagacgatttataaatgcccca -agtatccctcccgtgaatcctccgttatactaattagtattcgttcatacgtataccgcg -catatatgaacatttggcgataaggcgcgtgaattgttacgtgacagagatagcagtttc -ttgtgatatggttaacagacgtacatgaagggaaactttatatctatagtgatgcttccg -tagaaataccgccactggtctgccaatgatgaagtatgtagctttaggtttgtactatga -ggctttcgtttgtttgcagagtataacagttgcgagtgaaaaaccgacgaatttatacta -atacgctttcactattggctacaaaatagggaagagtttcaatcatgagagggagtatat -ggatgctttgtagctaaaggtagaacgtatgtatatgctgccgttcattcttgaaagata -cataagcgataagttacgacaattataagcaacatccctaccttcgtaacgatttcactg -ttactgcgcttgaaatacactatggggctattggcggagagaagcagatcgcgccgagca -tatacgagacctataatgttgatgatagagaaggcgtctgaattgatacatcgaagtaca -ctttctttcgtagtatctctcgtcctctttctatctccggacacaagaattaagttatat -atatagagtcttaccaatcatgttgaatcctgattctcagagttctttggcgggccttgt -gatgactgagaaacaatgcaatattgctccaaatttcctaagcaaattctcggttatgtt -atgttatcagcaaagcgttacgttatgttatttaaatctggaatgacggagcgaagttct -tatgtcggtgtgggaataattcttttgaagacagcactccttaaataatatcgctccgtg -tttgtatttatcgaatgggtctgtaaccttgcacaagcaaatcggtggtgtatatatcgg -ataacaattaatacgatgttcatagtgacagtatactgatcgagtcctctaaagtcaatt -acctcacttaacaatctcattgatgttgtgtcattcccggtatcgcccgtagtatgtgct -ctgattgaccgagtgtgaaccaaggaacatctactaatgcctttgttaggtaagatctct -ctgaattccttcgtgccaacttaaaacattatcaaaatttcttctacttggattaactac -ttttacgagcatggcaaattcccctgtggaagacggttcattattatcggaaaccttata -gaaattgcgtgttgactgaaattagatttttattgtaagagttgcatctttgcgattcct -ctggtctagcttccaatgaacagtcctcccttctattcgacatcgggtccttcgtacatg -tctttgcgatgtaataattaggttcggagtgtggccttaatgggtgcaactaggaataca -acgcaaatttgctgacatgatagcaaatcggtatgccggcaccaaaacgtgctccttgct -tagcttgtgaatgagactcagtagttaaataaatccatatctgcaatcgattccacaggt -attgtccactatctttgaactactctaagagatacaagcttagctgagaccgaggtgtat -atgactacgctgatatctgtaaggtaccaatgcaggcaaagtatgcgagaagctaatacc -ggctgtttccagctttataagattaaaatttggctgtcctggcggcctcagaattgttct -atcgtaatcagttggttcattaattagctaagtacgaggtacaacttatctgtcccagaa -cagctccacaagtttttttacagccgaaacccctgtgtgaatcttaatatccaagcgcgt -tatctgattagagtttacaactcagtattttatcagtacgttttgtttccaacattaccc -ggtatgacaaaatgacgccacgtgtcgaataatggtctgaccaatgtaggaagtgaaaag -ataaatattgcctacacatactgaattcaggcaatgcgttttattcgaaaggtcatataa -ctagaaaacatgatgaattcttatcggatccttttactagcatagtgttggcgaacacct -cgtaatgctcagcggcaaattggactgcgggtccttatcatacattttttttcaatatag -gcgattggtctaggttagtgattccccaacacttaaggtttgctgacattcataccctca -gcaacttcctctcaaaaattagagtgagttggtggtcttataagaccgttgattatttga -ggtggtcaaatgatggtgcgatgcacaaatcgttataatcgtactctgtagacaataacc -cattgtagtgccgattttgtgcataatacaagaaggaggatataaaaatgacttttcaat -aatattggctattagcaacaagaaggagaatcctcattaagttagcaaccgcagggggta -ctgcagtccaaggaggtttcattggagagagcagtatgaaaacggcaattatgattgtga -gattcgctgaagattgtgtctctgattttcctagatagaataagctatagctacttaatc -aactcttaactgtggagactatcctgatgatctgaataccccatttacaaaattccatat -caatgaggctaacgcttaaatttcatttctccatcgtaacaaaaatcagcctttttatac -aagacaaaacactgcttccattacgggtagcaatggttgctcgactactggtagcgtcgt -gatgtggtgataaagctgtcttgcgtttatacttaaacaaattttgacctgacataatgg -agcgacttatcggatgttgccgatctttagggtcatctattaagcttatacgaaaaaggg -acaagcacgttacgtaatctggtaggactgggtacctagaaacgcaagaggaggcgaact -ccaatatctgtaagaacagaaaaatacaggagtccttttcatttttcaagttaacaatat -aagtaggagcttagagaggcttgcatgaaaatcgttaggaattacagaataggcagagag -tggggcgtgtagactacattcttcaggccccacaatatgggttataggttaaactgcact -ttttgcgatctcccgaaatactgtcgttctctgcgaaccacgctcgttccttttgctgta -gtccacgttcatccaactattcagataaacaagatcgcagaattaaagcttaaccatatc -ttgatagcccatcgtgtatggggcatgtatgtgcaaacaaaagacctcaatcttgtctgc -gagagggaggaaaatttagacaaacataattcattctttcgactggacacgctaaggttt -ggacaaactttgtatctatatctggaggcctgtattccagcccttcttttaataagattt -acggcttaaactatggatatttgccaggaaatgacactgctattgacaggaacataattt -tgattcaaacctcattgttaattattttatatctcctgtttttatatcagaatgcttctg -tcctagaaggcatactcaaggtgagggctcgaggaatgaatcataatagaccggccccta -ttaatattggttcaattctttcttacataacgcggaatttgattgcacgaacaccgggaa -cacataaccgtatagcgcccgttatgctagtgcctagcgactgggaccgtggagtctata -tcgtctttctaccattattaatctaaggatataccactttaagtcctttcaactaacata -aggcgcattccatgcgctaaggaccttgaatttattatttcttacatgataaaagatcga -gtcgacgggaacaaaaggctacgtactcaataaagtgcagtttactaagagccctttttc -tggcttgtggagactatcataacatgaagatgttttgacattcaatagtttgcaaaacaa -acttactttgtgtagtattgaacgagatctttccaattgccccatagcaggaatagttat -atattgcagatcgcggtgtaacgcactccaaatccatcgcggtgtgtgagggtaagcgac -ttaaagaattacggtttttgatcaaagcacagtgagagttgagcaaattacagttatacg -acttaattcagtctccataaattgaaacgacacttcttaacgggaggaccagacacgttc -attaagtgaggagtgcactttttgactttaaaaacatggtaatcaatttaaaccacttga -tatgtatatgaacagatttgaagttatttctgttttaatacactgggagttctgtcaata -tcgcaggaaccgcctgacgtcccctatcacacctcagagggtaaagggacaggggaaagg -gtaatcgaggggtagggaacgtagttggcacacccaatggacgaataaatgctgccatat -ccacggagggcgggattgcggttgattttaaggcgatggtaacctgaatgtaatagatca -tcaaatgcctcctccactggaaattactgcgtacatccgctgagaattgcaatggagtgt -ctcggtttttctttaaacaaaaccaaattgacaacttcatagtataatttttgcacatta -caagcgttaattaacaaacttactttgctgttagctgcctatatttgtccgacaatataa -ctggatatctctgcgagaactgtaaattaacggcacttggaacataatagttcctattgg -taacgacgttgtaggcggcaattatccggtggaagaattgacaactgcagttgaactgca -tgaaagtcaaatctctcgtaagtataactttagaagactccaaggtacccagaacctctt -cagcggacacgatcgctatcaatcaataaggattattcactgaaaccgctcatatctgga -ggtggacgtttttcttcgaaaagcttgtcaaaggactcatcaaatttttggccgtgctaa -tcgacacacctgttattttcatgaccggataggacatctcgcggaaattcgggtaacagc -tgggtagatataggacctcccctacgtattaatgataagcctgtcataactagcttggtt -taccgaagagacaataaacattcgagcgctcgtgccaaactcggtgcattacgtttgaat -aaatcggtaacatgtactattactctgcctaacggcacttacccgtttgggtccatgggg -taaccgctcgatgttgacagaattatgctaaagtcgtttaagatcccgattaccgaaaat -ctggttatgtctgagcattcgtacactgcgtattaagatcaggttgaacaggttcctaac -aaattttgtgacctaaagtgaaactaggtcgtactctgggcatgttttatgtcgtggcgt -atgcatgtgctgacacttctaaaaccaaattaaggctttatccaatatgggtccttaagt -gctaaacatcattcacaatttcaagacagattgttggtcttgtcgattccgcatctgtcg -ccaaattgacacatcgtaaaccaggtacatcggtaattatatgttgactaaactaccgtg -tgtattctggctctaggtacggcgaacaagtacgatgtgcttaagaagccctcaccccag -acgagcccgcgtaggtcacatcagcagatcctaagtaattccgttttattgtcctgaggg -agtaggatcgacgaactctacaagtcgctttgtcgtgccttataggctatttcgggtcaa -tgtagcgtcaaatgaactattgtcatctgtacgagttaactaagtgtctatcgccaacta -aaagacgtctcgatggttctttatgcggacctgtcatatcattgactggcacttgcttac -atccaaataacacgtttgttagcggatagtcgttaagtgtgcgcaagatcatgaggcggg -gggggtaatatttcgccctctacatgataaatgaataagtaagaagatgatctttttgtg -gcggtaccttaagcgtactcctgtcgacgagttactactaaaggaatgtagggttctgga -tctatgaaaagcgacctccatatatatacgggcctaagcggagtaaaataagtgatcaat -ggactaacattgaaatgttagtattgtcgaccattgagggctggtaaatcttatttacgg -gcgtgggaaaacgaacgtgatatggtttagcatgggatgcaagcactcgttaatgcttac -tttagttggttgcgggaacaacaggaggctatactaactggtagcgttcttgcttccatt -atgttattattataattaaaaataagacatatggtagagttgtagtcagggtggatcggg -ttgtctataacgttggaataatcaaaactatcgttaacaaaaacgaaatttaagtcggtg -cggtggaatgcgcctacctcatgtgcaccacacattcacagcacacccctcattataggc -aaggaagcaaacaaaaaaaagttaatcgaccgtatccgaccttaaattttaaaataaata -gaaacacttagcggtaatgaaaagataggactaaaattcactagtatcctggaacgaggc -aacagagttatctagatggtaacgaggtgctgcatcaagatgtatgatttttggtccgct -gtgtggaatacctctattgatatacaagtgactttctcggtaataacgcacttcacaatg -tgttgtttcttttctatgtattttgcaagagaaagaagcttagtataggtacacctcaga -gatgtttcgtgtaaatcgtatcacatggtataactgcaggaggaacattatccaaattca -ccacaattactaatccacccttttacttttactaaagatatattaattctcatgttgtct -gaattgtataacccggtaccctgggagcgtatcgaaggataccaattgaagtcctcgagg -catgttacaacacacgacttccttccgtctattcagacactcaacgagactaacttttcc -taggtaatcaatgatattgggtaactcgtggcatcttatagttattgatccggctctttt -gtagatcctgtgcgactcgtgcgctaattaagactggctctcttgcgcaggggatacgtt -tattctacgtacccgatttggttactactaagcggcctttcttcaaacttgcagttgtga -cttacattcctatttcttcaaagcagggaagggttacagggagagacttattgagatacg -attggaatttccatgtacaatcgttaatacgcttgtagaccagcaactcagtatagagat -ccgtttcctaaagggtgagcggtaggggcaaggcaataagaaattactaaaaccctagtt -gttaatataagaacgattcgaaacaataggattgcccaagggggtgcgaacatggtgtaa -atcaaagagaaataggcattgttaaaacccgcacgtttctagtacgcaagaggaacgtcg -gtaaccagttctcaaagatcctaacctaaaaggggcttattctactttttccgacactca -atggacgagacaaacatgaacggatagctttaggtctcgttgaatgcaaagaatagaatc -gttattattaatcggtttccattatctatatgcggtatagatctccgagaggaccctgta -aactagctctgcggtttaactggtgctaatagaccgccactatgttattgcttctagctc -ctagcgtcttatcatgttatacattaatgtcgcatattggacagtagccaggcttggatg -gatcgccgacaaaaagaaaagactttccctgtaaggacttaactattacatataacttgg -atcattaatctgcaaattagagtaacggtctttcaccagcttcatattccaacgtggcgc -tagtcgatatcccatgaagtttaaaactagaattggcagtctcacttcacagtgcgtatc -tatacgacaaaagtggtcgatttgcataaatatcttatcgatattcaggttattaccgat -tccttgctaacgctagaagtcacaccagagtaataataattccagacacctgtgaaataa -tcggtcactacggatagactagtaacgataatacgtatagtccataaaagttgaatttta -ggggctaaagatattagcaatactggtctagcctaatcgtcgatagcaaagggctgtgag -gatttctcctacattttcgaccaattgtatcgataggaatagttacagtcacgcttgtag -atgtaagagatgacgttattcttagggttcttaagtcggggggtaatttaagaccactag -taaaggtagaggcgtacacagtaaacgatattttgaaatcgtcaaaaaaaagtttacaac -atcctttaagttagcaactgattttagtggcaaccttaacggttgaattgatctactaat -acaggcctacaccgaagggtacagataatgattcttactaccctaacatgatagagtcct -gtcctatctcataggtcgacattttaaattcgtaatgagcaacgaagatcgtttcccaat -ttgcaacattcacttatagacttcaggttatttcgtgctaacattaagatagaatataat -cagtcgttaagaaactattatccagctttcgtcaaccataaagattaaaaactgaaactt -ggcaagatatgaatagctatcctgctttaaccgatcgtatgagatgctttgtagcaagaa -aagtgactagcacttgtgtttagtaaagcgggagagtgcggtaattaatattaatatact -attaagctacacagcaaaggctgcaataatgttagtaagtagaacataaaggtattctcc -acaagtaataaatagtgtgagctaattgactaacttaactctcgcgacaagtgatgtgga -taagatgactcatatcgtctttttctgtagtgccgacatcccacctggatcgaacaattc -cttctagttatcgactttgattacctatcctattaaacagatagggttgtaaagtcagaa -aatgatcggcttgcgttggtctaccatagctagagttagaacgcgtagatagaggccttt -tgttgccaacgtgggggtgggatgagtctgggcgagcgtgactttctttcgtgtccgaat -ttgtttaacatccattagattagatgtttgtgttttgggtctgatgtcctaactactttc -tcagtgaaactaatgtcatcatccaagtaaaatagtccgatgaagtctccgttttcggcc -gaagcttgtctataacgtatataaagtcgctgaatttagaacacaccttatctatgttgt -aaagttactttattccaaaggacgtgcacgaagcgtgagtgtgggaaggaacttaaagtc -ggatcactcttgtcagtgtagataagaatttctttcatacttcactggaatccggcgtat -ggatatctctaccgcgtcatctggtggtgtctgcggtaaaaagtcttgctgcacgagtct -gagaaatttttggtgccatcacatcgtaactgtacaacgaacaaatagcatcaggccttc -ttatccagcgtgaagtctaattatttcacaagctttcctaagtatgtaaatccctcactt -aatgatgcttgcgccaatgaggatagaggacattgcatgtacgtaggactattctccaag -gggtcttctattttgttagcgaaaattgttacagcctaatgttagagcggcgtacgactt -tataccagatactttcattagatatgcaaatatccaattaaatcatagtagtatcgtggt -atggacaatcaaaaaagacccgttgtgatatgatgtttttctagttcgttctcatatata -tagatcaacaatgaataatctcatgatctataaccgatgtatatttatattccggttgac -tgctccggtgcaattcactacggacactaatgactaatatggcgcctttcatcagaaacg -ctaaatatgattaatgaattaagggagtattatctaattattagagagtagcagttagtc -tgatattttcggtgtatgtgttagccgttataatgctgtctttttatcagtgagaacagg -gagtgtgtagtgttgtatgcttcactttatgactctggttatatccctcggagaacaaga -ataagagtacgagaagttcggtcattgaggatgaaatagaaccgctagacgaatggactc -acgtttataaaactatgtatcacagtactacagctaactctgaagtccgagaagcttttg -taggacaaaacgttataagtacctttcgcagaatacggccgtgcatacctgttataaggc -gtagtagggacaccatgctatccctcatatagagctacactaataccattacatggtgac -tatcgtttacggccatcatctgtaagcgatcatgcctcgttagcatccgtacaatctcgc -atggcgtcactgcagaaaaaccccgtgcggattttgagtcagaactattcgaagcttctc -aatccttttccattatggcatagcaagtgacgactcgtcagccatgggaataatagcact -aatccgattacttatgaattagaacccacatgaatgtgattctgcgaattgtctaagaat -ctaatgattttccggtgaatatggttgttgttatttattgaacttatattattaacatca -cccttcgttagtgatagtcagctatttccaagaggttccccgagcatttttaccattctc -tagtcatacaagttggagcgcttttaaatctttaggctgatcaaggcgttttgtctagaa -ttctgcagatgttagattcgtgtgcaatccctcttgcatgtcagtaacaggtcacccgtt -tttcgttacatatgctggtaaaatattcatagtaataactacaatacttgatttgttacg -taatgctcgtacataacacaatcgtattccacggaacagtaaagctctattattctgatc -gagcctaagagaggatcacactacgctattaaagtcacgttcacgaaatctcaaacctca -actgctggtgaccagttatagacagtgtaattccatattacatgtcaggcttaagctaac -ccgagcctttatataagctataatcaagaattagattggagtgcattttagacttatcta -tcgaaatagtgatagtaagagtttatatgacctgatctagactgatgttctcttccacaa -cagccttaaggcgtggagcctttcttatactattaggtcgcgtcgagagccctattcgta -atgttaacgacactagactaatatacaatgagctaagaataacacaagtcacaagataat -ttacaaatcatatatctacagtccacaaccatcactagcgattgcaaagcgttattggta -ctaccgctctaaatcggtatgtgcaagacgcgttaactggttcaagcctctcctgctcgt -gagactgaaagaaatcgaaaatatggatgtgcctaattgttcttgtgagtcatgtgcaac -tatacagtttagtttggtcaagactatgcaactattaacagcatgtgcgcattgaatatt -tggtgtcgattgataaatgccccaacgttccatcacgtctataagccgtgttactaatgt -gtattagtgcatacctattcagaccatagttcaactgttggactgaaggcccgtcttggg -gttcgtgaatgagagtgcagtttcttgtcttttccttaactgacctaaatgaaggcaatc -ggtttatctagagtcatgcttaaggtgaatttcagccaatgggctcccattgagctagta -tggtgctttacctttgtaagtggtggctttccttggtgtgctgactttaacacggcagag -tgattatccgaagaatggataataagacgctggcaatattggctaataaagtccgatgag -tttcaatcatgactgcgaggagatccatgcggtgtacctaaacctacatcgtatgtattt -gctgacgttcattcttgatacataaagatccgatatcggtccactttgtttaccaaaagc -cctaccttcgtaacgatggaaatgtgaatgagagtgaaatacacgatggggatattgccg -gtgagtacaagttagaccacacattagaactgacctatattcgtcatcatagagatggag -tatgaattgattctgcgaagtacactggctttacgagtatctagacgccgcggtatatct -cccgtcaatactatgaaggtatatatatagaggctgaaaattcatgttcaatcctctttc -taagagtgagtgggagccccttctgttgtcggagtaaaaaggcattattcctcaaattgt -cagaagcaaagtatacgtgatgtttgcttagaacaaaagagttaccttagggtaggtaaa -tctcgattcaccgagagaagtgattttggcggtgtgcgattaattcttttgatgacagat -ctcattattttatatagctccctctttgtatttagagtttgcgtaggtaacctggcaaaa -ccatatcccggggggagagtgcgctgaacattttatacgatgtgattactcaaaggataa -ggttcgaggcctctatactcatggaactatcttataattataatggatcgtggctcattc -cacctatccaaacttctttgtgatctgatgctacgagtgtgaacaaacgtacatcttcta -aggaatttgggacgtttcatagctcgcatttcattcctgaaaacttaaatatttttaaaa -attgattctactgcgaggaactaaggtgtagacaagcccttagtaaccggtggatgtcgc -ttcagttttatagcaaacattattcaatttcagtcttgactgaaattagtttgttagtgt -tagaggtccatatgtcacatgcatatggtctagatgccattgtacagtaataccttagat -tagtattagcggcatgcgtacttggatttcacttgtaagaatgagcttaggacggtcgcc -tgtagggctgcaaataggaatacttacaatttttgatgacttgttagcatatcgctatca -cccataaaaaacctgatacttgatgagcgggtgattgagactatgtactgatataattca -atagctccaatagatgaaacagctatgcgcctatttatgtcaaataatcgatgtgataca -agcttagagctgaacgagcgcgagtggaattagcggtgatctctatcctaaaaagccacg -aaatcgatcccagaagctaatacccgaggtgtcaagcttgagttcagttaaatttgcatc -tcatgccccacgaagaatgggtagagagtttgaaggtgcttctggattttcctaagtacg -tggtaaaaatttgatgtaaatgaacacctcctaatggttgtgttaaccacaaacccctgg -gtgaatctgattagccaacccagtgatctgatttcagttgtcaaatctcttttttataac -taccttttgtttccataatttaaccggatctcataatgaacaaacgggtagaataatggt -agcacatagcgagcttgtctattcagaaatatggcctactcagaatgtattctccaaatc -agtgttatgcgaaacgtaattttacgtgtaataatgatgatttcttatcggttccttgta -ctacaatactcttgcccaacaaatactaagcataacagcaaaattcgaatccccctcctt -ttaataaatggtttttcaatatagccgattcgtattcgttagtctttcaccaactattaa -cctggcatctaattaataaaatcaccaaaggactctataatatgacagtcacttcggcct -cttttaagacagttgattattgcaggtccgcaattgatggtgacatgcacaattagttag -aatccgactatggagacaattaacaattgtagtgcccatttggtccagttgacttcaacc -acgagttataaaggtattttaatttatagtcgatagtaccaacaacaagcacaatcataa -ttatgttagaaaacccagggggtaatgctctaaatccagctttaaggccagagtgcacta -tgaaatcgccattgatcattgtgtcattcgctgaacttggtgtctaggaggtgccgagtg -agaatatcagataccttatgaagcaacgattatatctggactagatcatgatgatcggaa -taaaacattgaaataagtccttatcaaggagcataaacattttatttaatttatacttcg -taaataaattcagaattttttttcaagacattaatctgagtaaatgacggctagaaaggg -ttcctactcgaatcgtagcctacgcatgtgggcagtaacctggcttgcgtttttactgaa -acaaaggttcaccggaaagaaggctgccacttttagcttcttgacgatctttagcgtcat -atttttagattagtcgaaaaacggaaaacaaacttaacgaagctggttgcacggggtacc -gagaaaccaaagagcaggacaactccttgatcgggaagaactgaaatagacagctgtcat -tttcattggtcaacttatcaatataacgaccaccgtagtgacgcttgcatgaaaatactg -aggatgtaaactatagccagtcaggcccgcgtgttgactaattgatgaagcaaacaaaat -agccggtattcgttaaaaggaacgggttgccagctacagatatactctaggtatatccca -aacaagagacgtcctttggctgttgtaatcggtcataatacttgtcacataaacaagatc -gctgaattaaacattaaacagttagtgatacacaatcgtggttggggctgggatgtgcaa -taaaaagtcatctatcgtctatcacagagcgacgtaaatttagacaaacattattatttc -ttgacaatggaatcgataagcgttcctctaacttggtatatatatctcgaccccgggatt -ccagccattcttgtatgaagatttaaccatttaactatgcatagttgaatggtaaggaaa -atgatattgactgcaacagattttggatgcaaaaatatttgtgaattattggttatatac -tggttgtatagcacaatcattaggtcctagaaggcatactcaacctcagcgagagagcta -gcatgcataattgtaccgcccatattaatattcctgaaatgatttcttacattacgccca -atttcagtcatcgaacacccccatcaatttacccgatagagaacgtgatcatacgcaata -ccctatgcgaacgtccactctatagcgtctgtatacaatgattattcgttccatttacaa -cgttaagtaatttaaacttacataaggacaaggaaatccgcgaacctcctggaatgtatg -agttatttatgcagttaacttcgtctcgaccggaactaaaggcgtcgtacgaatgaaagg -ccacttttagaagagacctttgtatccattgtggagaatatcataaattcaagatggggt -gtcatgctattcggtcctaaacattcttaatggctgttctattgttagtctgatttaaaa -tggaaccatagcacgaatagttagatagggctcatacccctgtaacgatctacaaatcct -tccccgggtgtgtgcgttagcgacggaaagttttacggtttgtgatcaaagaacactcac -acgtcagattattacactgatacgaattatttcagtcgacagtaattgaatagaaactta -ttaacgccagcacctgacacggtaagtaaggcaggtctgaactgtttgactgtaaaaaaa -tggtaatatttttaaaaatcttgatttctatatcaaatgatgtgtagttttttctctgtt -attaaaatcccagtgcgcgaaatttagatcgttacgactcacgtacaagatcacacatca -cacgcgttagcgaaagcggaatggctaatacagccctacgcaacgtagtgggatcaacat -atggacgaatttatgctcaatgagccaacctcccccgcattgcggttcattttaaggcct -gggtaacatctatcgtttagataatcaaaggaatccgactatgcaattgtctgacttcat -ccgctctcaagtccaatgcaggcgctacgtgtttctttaatcaataccatattgaaatcg -taatacgataattgttgctattgactacaggttatgaaaaaacttactttgcgggtacat -gcatatttttgtaccacattattacgcgatatctctcagtgtactctaaattaaaccctc -ttcgaacattttagttcctattcgtaaacacgtgctacgcggcaatttgccggtcgtaga -atggacaactccagttcaactgcatgtaactcatagctcgcgttagtataaattgactag -tagccatgggacaaagtaactagtcagcggaaaagatccctttaaagatatatgcaggtt -gcaagcataaagctcattgctcgaggtgcaccgtggtattccaaaagcgtctctatcgta -tcttctaattttgggccgtgagaatcgaaactactctgatttgctgcacacgttaggtaa -tatcgcccattttcccgtataagctccgtacttatacgaactacacgaccttttaagcat -tagccgctcatatcgtgattcgtgtacagatgagtctattaaaattacagacatactcca -tatctcgctccttgaactttgaataatgcgctaacttgtactatgaataggcagaaccca -actttcccgtttgcgtcaagcggggaaacgatacatgttgtcagatttatgattatctag -ttttagatcacgtttaccgataatcggctgtggtctgagcagtcctacactgagtattta -cttcagcttcatatcggtccgaaaaaaggttgtgaccgaatgtcaaaatacggagtacga -tgggcatcttttttcgagtcgcggttgcagggcagcaaaaggcttaaaccatttttacga -tttttactatagcggtcatgaagtgcgaaactgcttgcaaattttctacacacattgtgg -ctcttgtccttgaagcttatggcgaaaatttgaaacatagtataccagggaaagcgcgaa -ttatttggtgactaatagtccgtgggtttgagccatatacctaacgccataaactacgtg -gtgctttagatgcaatctaaacagaacagaaagcgtagcgctcatcagcacagactaact -ttttcagtttgagtcgccggagggacttcgagacaagaacgcgtcaagtcgcttgcgcgg -cacggattcgattgggcggctcaatcttgcctaatttctactattgtcagctgtacgact -gtactaagtgtatagccccaaataaaagaagtatcgatgcgtctttatgaccaaaggtct -tataattgaagcgcacttccgttcatcaaattaaatcctggcttacccgattctccggaa -gtctgacctagagattgacgacggccgcgtattattgagacctcttcaggattaatcaat -aacgaagtagttgatctgtttggcgacgtaccttaagccgactccgctacacgagtttct -actaaaccaatgtagccttatgcttagatgaataccgtcctaattagatattccggcata -acagcagtaaattatctgttcaatggacgaacattgaattgttagtattctacacaagtc -aggcctcgtaaatattaggtaaggccgtgggataacctacgtgatatgcttgagcttgcg -ttgcaagctctcgttaatcattaatttaggtgcgtgagggttaaacaccagcatattcta -tatgctagacgtcttccttaaaggatcgtagtattataattaataataagaaatatggtt -gacgtctagtcagcgggcatacgctgctctatatactggcattattcaaaacttgacggt -aaaaaaacgaattttaaggcgctcacgtcgaatgagccgaactcatgggaaccaaaatgt -cacagaaaacacctctttattgccaagcatgcaataaaaaaaatgttaatagtacgttta -cgacattttattttataataaagagaaactattacacctattgatatgataggacgtaaa -ttaacgagtagcctgcatagaggcaaatgaggtttctacatggtatagacctgatgctga -aacatcgatgagttttggtcccctcgctcgttgaaatctagtcatttactactgtctttc -gagctattataccacttcactatgtggtgtttctttgctatgtatggggctagtcaaaca -tgatgactatagctacaactcagagagcgggcgtgttaagagtatctcatgctagaactg -cacgacgaacttgatacaaagtaacaacatttacgattccacaaggtgactttgaagaaa -catagtttaattctctgcttcgatcatttctataaaccggtaccatcgcagcggatagat -gcataacatttctactactccaggcatcttaaaacacacgtagtacttcactagattaag -acacgataagtgtataacttggcagtgggaagcaaggagattggcgaactcctggcatct -gttacgttttgttcaggctcggttgttgataatgtccgactcctgccatattgaagactc -gctcgagggagatcgggattcgttgattataagtacacgtgttccgtaatactatgaggc -agtgattcaaaatggcacttctgacttacatgactaggtattattaccacggaagcgtta -aaggcacactcttatggacttaagattgcaagtgccttcttctagcctgaattcgcgggt -tcaacacaaactctctttagacatccgttgcctaaaggctgagacgtaggggcaaccctt -taactatgtactaaaaaactagttggtaatttaacaacgtgtccaatcaagacgatgcac -caacgcggtgcgaaaatcgggttaagcaaacacaaataggaattgtgataaaccccacct -tgagaggtcgcaagaccaacctcgggaacaacggctctaagagaataacctaaatccgga -tgagtagactgtgtaactctctaaagggaagtgaaaaaaagctaagcatacatttaggtc -tcctgcattgcattcaattgaatcgtttgtattatgagctgtacagtagctatatcagct -atagttatcccagaggaacaggtaaactagctctgagcgtgaaatccggatattagaacc -cctagatgggattgattctagctaatacaggcttatctggttttacagttatctagatga -ttggtaaggtgaaacgcttggtgccttccaccacttaaacaaaagtattgcccgggaagc -tattttctaggtattataaagtcgagcattaatatcaatttgacagtaaaggtctttcac -cagcttcatatgccatagggcccatactcgatttaaattgaacggtttaacgagtattgg -aactctcacttataactgagtagctatacgaaaaatctggtccatttccagaaatttatt -atcgatttgctgcttagtacccaggaagtgataacccttgaaggcacaacactgtaataa -gttttcctgtcacatctgtaatattcggtcactacgcattcacgactaaagataattact -atactaattaaaagttcaatgttagggccgaatcatagtagaaattctcgtctagcctaa -tcggacttacctatgggctgtgaggatttatcagtatgtggacaaaaatgctagagatag -gtatagttaaagtcaccatggtacatctatgtgaggaagtttgtagttcgcttctttagt -ccgggcgtttgggatgacaactactatacgtagagccgtactcaggattagatagtgtga -aagagtcaaataaaagggttaatattaatttaacgttgcaaatgtgtttaggccaaacat -taaccgttgtagggatattctaatacaggccttcaccgaaccctaatgataatctgtctt -aataacattaaatgattgtctccgctacgagctcttagggcctcattttaaatgactaat -gtccaaagaagagactttcccaatttcaatctgtcacgtatagacggcaccttagtgagt -catatcattaagatagaagattatcaggagggaagtttctattatcaaccgttacgcaac -cataaacttttaaatctcataatggcattgagatcaagagctttcatgatggtaaagttc -gtatgtgatgctggggagctagatatcggtataccacttcggttgtggtaagcccgagtg -ggccgttagtaatattaatagacgattatccgacaatgcattcgctgaaataatcttact -taggagaaattaatgctatgagccaaaactatttatgtctgtcacattattgactaaagt -atctatcgacaaaactgatgtccataagttgtagcagatagtcggtgtatggtgtcacca -atgaaaacctcgagcgaaaaatgaattatagttatccaatttgagtaaattgcctattat -acagataggcttgtttagtcagataaggttccgcttgaggtgctctaacttagcgagagt -tagaaagcctagtgagaggcattttggtgccaaactccggctcgcatgagtaggccagag -agtcactttctttcgtcgaagaagttggtgaacagccttttgattagttgtttgtcttgt -ggctatgtgctactatataagttagaacgcaaactaatctaatcagcaaagtaaaatagg -accttgaacgagacggggtacgccgttgaggctcgagatagtagataaactagaggaatg -tagataaaacattagctagggggtttagttactggattacataggaagtgcaccatcacg -gtgtgggggttcgtacgtaaagtcgcatcaatattgtcagtggacttaacaagttcgtgc -ataatgaaatcctatacggactttgcatatctctaccgactcatctggtcgtctatgcgg -gtaattgtattgctccaagtggatgactattttggcgtcccagcacatagtaaatgtaaa -tccttataatagcataagcaattattagactgcgtgaagtcttagtagttctcaagcttt -acgttgtatgtaaataactcacgtaatcagccgtccccaaatcaccattgaggtcattga -atgtacggagcactattatcaatgcggtatgcgattttctgagcgattattgttaaagac -ttagcgttgagccccggaacacttgattacagattctttaaggagttatccaaatatcat -tttaaataatagtagtatcgtgctttggacaataaaaaaagacccgttctcttatgttgt -tttgcgacgtacttctctgatatatacttcaactatgaagattctattcatcgataaccc -aggtatatttatatgcccgttcactgcgcagggcaaattatctacggacaataatgacgt -agttggacccggtaagaactaacgcttaatatgattaaggatgtatgccagtattatctt -attatgtcagagtagaagtttctctgagattttccgtcgttgtggtacaccggatttggc -tctctttttagaactgagaactcggagtgtgtagtcttgtttccttcaatttatcaatat -gcttttataccgccctcatcaactataacaggacgacaagttccgtcttgctccatcata -tactaccgatacaccaatcgtatcaagtttagtatacttgctttctctcttctacagctt -actcgcttgtccgagaagcggttggtgctcataaagttagtagtaaatgtacaactagta -gccagtccttacctgtttttacgactactacggacaccatgagatacagaagttagtgct -acaattataccattacatgctcaatatcgttgtcggccataagatcgaagagtgcatcac -gcgtgtgaatacgtaaaatctaccatcccgtcaatgcacaaaaacacactccccttgttg -actaacatcttttacaagaggctaaatcattgtccaggatcgaataccttgtgtacaatc -gtcacccatcggaagaataccacttttccgatgtagtatgatttacaaaaaacatctatg -tgagtaggccaattgtagtagaatatattcatttgaccgtcattagccttcttcttaggt -tgtgtacggatagtaggtacataaaccgtcgtgtggcatacgctgcgatttcatacagct -gccaacaccttttttaccaggctagagtcagaaaagttggagccatgttaaatagttacc -atcataaaccactgttgtctactagtctgatcagctttcatgcctgtgcaagcaatatgg -attctcacgtaatggtaacaactgttgcgttacttaggctggttaatttgtcagagtaat -aaatacatgtcttgttgtgtttcctaatcctcggaaagtacacaagcctaggaataggaa -aagtaaagctcttttattctgatagtgactaactcaggatctaaatacgcgattatacta -accttcaccaaagctcaaaaatcatctgctggtgaccagttatagacagggtaattcaat -atttaatgtctcccttaacatttcaccagcatggattgaagatagtataaagttttacat -ggcagtcattgtgtcacggttctatacaaattctgatagttagacggtatttgaaatgtg -cttctagcatggtatcttacacaactgaatgaacgactggagccgttcgtatactatttg -cgagcctcgagaccccgtttcctaatgttaacgaatatagtataatataaattgtgatat -gaataacacaagtaactacagtttggacaattaattgttctaaactaaaaatcattcact -tcagatggcatagagttatggctactacacatataaagcggtatgtgaaacacccgtttt -agccggaaaccctctactgctcgggacaatgaatgatttccaaaatatggatgtgcagaa -ttgttagtgtgactcaggtccaaatagacactttagtttcgtcaagtcgttgcaaagttt -aaaaccatcgcagcattctttatttggtctacattgagaaatgaaaaaacgtgacagaaa -gtctagaagaactgtgaataatgtctattactgattaactagtaagacattagtgcatct -ggtccactgaagcacccgcttggcgttaggcaatctctgtgaactgtcgtggctgttccg -gtaatgtacgaaagcaagcctataggttgatcgagtcgcttcattaaggtcaatttcaca -atatccgatcacattgtgctaggttcgtcctttaccttgcttagtgctgcatgtacgggg -tgtcatgacttgttatcggcagactctttatcccaagaatggataatatgtacatggaaa -gtgtccataattaagtcccttcactgtaaagaatgactgccacgtgatccatgaggtcta -cagaaaccgacttacttgctttttgatcaacttaattatggattcataaagttcagatat -cggtacaattggtgtacaatatgaaattaatgaggaaacatggaaatctgaatgacagtg -atagaaaagatccccatttgcccggtcagttcatgttacaccactcattagtactgtaag -tgtttcgtcagcattgagatccacgatcatgtgtttatgccttcgaaactggatgtacga -cgatcgagacgaagaggtatatataacctaaatactaggtacgttgttagagagacgatg -aaaattaatcgtcaatacgctggcgaacactgagggggacccaatgctcttctcggtcta -aaaaggaatgtgtcagaaattggtcagttcaaaagtagaccggatctttgcggagaacaa -ttcacggaacgtagcgttgggaaatatcctttctaccacacatcggattttcgccctctc -ccattatttattgtgttctcacatagaattattgtttagacatccctcgttgtatggaga -gttgcccgagcgtaaaggcataatccatataccgccgggtgagtgacctgaaattgtttt -tagttgggatttcgctatggattagcttacacgaagagattctaatggtactataggata -attataatgctgcgtggcgcagtacaccgttacaaacgtcgttcgcatatgtggctaaca -cggtgaaaatacctacatcgtatttgcaatttcggtcgtttcatagagcgcattgaatta -ctcaaaaattatatatgttgattatttgattagactgcgtggaaagaaggggtactcaag -ccatttgtaaaagctgcatctcgcttaagtttgagagcttacattagtctatttcagtct -tctaggaaatgtctgtgtgagtggttgtcgtccataggtcactggcatatgcgattcatg -acatgctaaactaagaaagtagattactattaccggcatgcctaatgcgattgcactgct -atgaaggtgcggacgtcgcgcccatgtagccctgataataccaatacttacatttggtca -gcaattctgacattatacctagcacccataaatttactcagacttgaggacaggctcttg -gagtcgatcttctgtttgtatgcatgtgatcatatagatgaataagcgatgcgactagtt -agggcatagtatagatctgtgtatacagttcagctgaacgtccgcgagtggaagtacagc -tgagatctatcctaaaatgcaaccatatcgttcacacatgatatgaacccagggggaaac -attgagttcagttaaattggcagcgaatcccccaagaagaaggcggagtgacgttgaacg -ggcttatggtttttcagtacttcctccgtataagttgagcgaaatgtaaacagaataatc -gttgtgttaacaacattaaaatcgcggaatatgatgagaatacacagtgtgagcatttca -cttgtaaaatatctttggtagaacttactttgctttaaatatgttaaaccgatctaataa -tctacaaaacggtagattttgcctagcacattgcgtccttctctattcagatagaggcaa -tactcagaaggttttatccaaagcactgtgttgactaacctaagttttagtctaataatc -atgattgattataggtgccgtggactacatgactcgtccacaaataatacttagcagatc -agcaattggccaagcacccgacttttatttaatggttgtgcaatagtccagattcgtatt -cgggactctttcaaataatagtttcctggcatctaagtaagaaaagctcataaggaagcg -atattatgacacgctcttccgccgctgttttgaaacttgagtattgctcgtccgaaattg -agggtcacttcaaaatttactgagaagacgaagatcgactaaagttaaaatgctagtcca -cagttggtcaagttgaattcatccacgagttatatagctattttaatttatagtcgagtg -tacaaaaaacatccacaataagatttatcttagaataacaacccccgtatcatcgaaatc -ctccgttatggcctgactcctcgagcttatagcatttgtgctggcgctcttgccaggaac -ttgctcgcgaggtggtgacgagtgagatgatcagtttcattatgatgatacgattttatc -gcgactagttaatcatcatagcaagtaaaatttgaattatgtcattatcatgctccatta -acaggttatttaattgatactgacgaaattttttcacaatgggttttctagaatttaata -tcagtaattgaagccttcataggggtcctactagtatcctacacgacgcaggtccgcagt -atcctggagggacgtgttactgattaaaagggtcaaaggaatgaaggctcacaatgttac -ctgcttcaccatagtgagccgatgagttttacattagtactaaatcccaaatcatacttt -acgatgaggcttgctagcgctaaagagaatacatacaccaccacatagaattgttagcga -tgatatcaaatagactcctggaagtgtcagggggaaactgttcaatatttcgtccacagg -actgaccaggcatggaaaagactgacgttggaaactataccatctcacgcccgacgcttc -actaattgatgatccaaaaaatatagcccggattcctgattagcaaagggttcacagaga -aagatattatcgacgtatatcccaaaaaacagacgtaatgtgcatcttcgaatcgggatg -aatacttgtatcataaaaatgtgacctctagtatacaggttaatgttagtgatacacaat -actcgtgggccatgggttctcaaataaaatgtaatattgcgtcgatcactcacccacgta -tttggtctaattatgttttatttagtgacaatccaatagataaccggtcctattaagggc -tatatttttagcgaccacgcgtttaaacaaaggattgtatgtagatggtaccagtttaat -tgccagtgggcaatcctaagcaaaatgagattctatcctaaagtttgggcttgatataag -atttcggatgtatgggttttataatcgttggagagctcaatcatgagctaatacatggat -ttcgctacctcaccgagagaccttgcatgaagaattctaaccaaaagtttaataggccgg -attggattgagttaattaagaccttgttcagtcatagtaaaaacccttaaattttaccga -ttgacaaagtgagcagtcgcaataccctatgcgaaacgcctcgatagtgactaggtatac -aaggtttttgagttcctttgaaatagttaactaatttaaaattaattaacgacatggaaa -tcacagaacctaatgctttgtaggagttatttatgctgtttactgcctctacaaccctaa -taaagcagtcctaagaatgaaacgcatcttttagttcagaaagtggtatccagggtggtc -aatttaataaattcaacatcgggtctcaggatattcggtcatataatttattaagggctc -ttcgagtcttactctgagtgaaattggaaacagtcatccttttcgttgtgaggcatctta -caccgctatcgatatacaatgcattccaccgcggtgtcccgtacacaaggaaacttgtta -ccttggggatataagaaaactcacacgtctcattattaaactgagtacaatttttgcacg -agaaagtaatgcaatacaatatgatgaaagccagctaatgaaaagggatggaacgcacct -cggatctgttgcactggattaaaatccgattatttttaaaaatattcagtgctagagcat -atcaggtctacttttttatctggtatgtaaagcccacggagcgatagtgagatccttacg -actcaacgaaaagttataacataactcccgttagccaaagcccaatcccgattactgccc -taccctaacgtctgccatctaaatatcgaacttgttatgatcaatgtgactacctcccac -cctttccccttcatttgttccactggggataagctagcgttttcagaatcaatgcaataa -gaatagccaattgtctcacttcatcagagctcttggcaattccaggcgctacgtggttct -ggaatatattcatttttcaaatagtaatacgtttagtgttgctattgtctacacgtttgg -atattacgttatgtgagcggacatcaatagttgtctaactctttagtaagccagagatag -cactcttagcgaatggataccatcttccataagtttagttaatagtccgaaacaactgct -tcgagcatatttgaacctccttgtaggcaaatagcctcttcaaagcaatcttactaatag -atagagtttgttttaagggactactagaaatgggacaatcttaatagtatgacctaaact -gacatttaaagatatatccaggtggcaagcataaagatcattgcgccacctccaccgtgg -gattacttatcagtcgatatcctatatgctaagtttgcgacggcagaatacaaactaagc -tgagttgatgctaaccttacctatgataccccattggaccggttaacagccctacttatt -ccaaataaaagaacttttatgctgtagaagctattatagtgatgcctggtaacttcagta -tattaaaatgacacacatacgccatatagagctcctggaactttgaataatgagcgaact -tcgaagttgaagagcaagaaaccatatgtcacggttgcctaaagcccggtaaccagacat -gtgctatcattgatcattatcgaggttttcataaccttgacccattatcggctgtgcgcg -gacaagtacttaaatcactagtttcttcacctgcttatcggtaagaaataaggttggcaa -agaatcgcataagacggacgtagagccgcagcgttgtgcgagtccaggtgcatgcgcagc -aataggattttaaattttgttccatttttaatttagccgtaaggatgtccgtaaatgatt -gaaaattggattcaatctttgggcctatgctactggaacctgatcgacaaaatttcaaac -atacgttaactccgaaagaccgtatttttgcggctagaatagtcagtcgcttggagccat -ataccttaccacttaaacgacgtgctcctgtagttgaaatataaacagaacacaaagact -accgatcatatcaactgaagatctttgtaactttgaggcgaagcaccctcttcgagacaa -ctaagagtaaagtaccgggcgccgcaaggagtcgattgggaccctaaatcttgacgaatt -gctaagaggctcagagctaccactgtaatttctctagagcccataataaatgaacgatac -atccgtaggtagcacctaagggattataatggaagccaaatgcagttaataatattatat -actggcgtacacgattcgacggatctctcacatagtgattcacgacccccccctttgatt -gacacagcgtcagcattttgcaagaacgatcttctgcatagggtgcgccaccgtaaggat -gacgtcgaagctacaactgggtataatttaccatgcttccctgatgctgagtgcaataca -ctaagaatgagtttttaccccatatcaccagtatttgttctgttattgcgaagaaatggc -tatgctgagttggcgactaaagtcacccatcctttttattaggtaaccccctcccttaaa -ctaactgatttgctggagctgccctgcatacatatactttatcatttatggacgtccgtg -acgcttattatccaccatagtcgatatgctacacggattcattaatggatcgtaggagtt -taagttatatttactaagatcggtctcggctactatcccgccttacccggcgctatttac -ggccatttttaatatattgacggtaattattcctatggtttcgaccgcacgtccttggac -aagaaagaatggcaaaaaaaatgtaaaagaaaaaaaatattgagtccctaccatcatata -aaaaatatgtgatgagtaacttgacgaaatgttagtggttattaaagactatctattaca -ccttttgttttctgtcgtagtatattaaagtctagaagccttacaggaaaatcagggtta -tacagccgatactccgcagcatgaatcatcgaggaggtgtcctaccatcgcgccttgtaa -tcttgtctgtgtatactgtatttagaccttttatacaaagtaaatatctcggctttatgt -gattgggaggggcctactcaaacatgatgacttgacctaataatcactgtgcgggcgtct -tatgactagctattccttgaaatccaccaccaaatggttaatatgtaaaaactttgacga -tgaaacaaggtgaatgtgtagttactttgtgtaattagctgcgtcgagcattgcttgtaa -aaccgtcaatcgcacacgttacttccataaaatttctacgaatacacccttcttaaaaaa -aacgtaggaattcacgagtttaacaaacgataactgtataaagtggaagtccgaagaaag -cagatgcccgaactactcgaagatgtttcgttttcttaaccataggggcttcttaatggc -ccactacgcacattttgttcaagcccgagagggacatccccattacgggagtattactaa -aactgttccgtaatacgttcagcaagggatgaaaaaggccactgctcaagttattgacgt -gggagtattacatcggaagcctgaatcccacactatgatggtctgtacaggcctagggac -tgcgtctagacggtattaccggcttctaatcatacgatcgtgagtcttaacgggaagtaa -ggctcacacctaccccaaaccatttatctatgtaagtataaaattgtgcgtaagtgttca -aagtggacaataaagacgtggcaaaaacccccgcacataagccgctttagatttcacaaa -taccaatgcggttaaaaacatccttgagtcgtacatacaccatactcgcgttaaacggat -ataacagaagataataaatccggatgtggagtcggtgtaactatagaaagccaagtgaaa -taatgcttaccagtcatttagctatacggctttcatttcatgtcaagagggtggagtttg -acctgtacagttgatatatcaccgatacttagaactcacctaaagctaaaattgctcgca -gcgtgtaatccgcatattacaaacaatagatgggattcattatacataagacacgatgat -ctgctttttcaggttgcgagatgttgcctatcgtcaatcgagtcctgccttacaccactt -aaacaaaagtattgacagggaacctattttcgaggtattatatagtccagcttgaatatc -aatttgacagttaacctagtgaaaatcagtaagaggaaatacgccacattctccagtgaa -attctacgggttatcgtctagtccaactatcaattataactcacgagatataagtaaatt -ctcgtacttggcctgatttttattatactttggatccttagtaaacaggaagggagaaac -cttcaacgaaaaacactggattttgttttactctcaaagctcttatatgacggaaatacc -ctgtcaagtcttaactttattactagactaatgaaatgggcttggggtggccagaatcat -agtacaatttagcggatacactattcggactttcctatcggctgtctggttggataagta -tggggactaataggctagacatacctatacttaaactatacaggcgtcatctatctctgc -aactttggagttccctgatgttctcccgccctttgggttcacatcttctataccgacacc -cctaataacgattagtttgtgggttagagtaaattaatacggttaatattaatgtatcgt -tgaaaagctggtgtcgccaataaggtaaccggctaggcagagtatatgtcacgaagtata -actaccctaatgataagctgtaggaataaaattaatgctgtctctaagcgaagagatatt -tccgactctgttttaatgacgaatctcattacttctgacttgcaaatgttcaatatggca -cggtttcacggcacctttgtgacgcatataatgaacttagaagattataacgacggaact -ttatatgataatccgttacgattaaagaatctgttaaatatcataatggcattcagttct -agaccgtgcatcatggtaaacttactttctctgcatggcgacatacatttcgctattcaa -attcgcgtgtggttacacccactcgcacctttggaatattaagagaagatgatcagaaaa -tccattcgctcaatttttctgacgtacgtctaatttatcctaggagacaaatcgttttat -gtctctcacatttttgaagaaaggttcgagagacaatactcaggtcctgaactgctagaa -gatactcggtggagcgtggcaacaatgaaaaactcgtgacataaatgaatgatacttttc -caagttcagttaagtgaatatgtttaacatacccggcttttcgatcttaagctgacgctg -gacgtgcgagtaatgtcagtctcttacatacactagtgactccaagtttcgtcaaaaacg -ccccctcccttctcgagcccactcacgctatgtattgacgcgaacttgttcgggatcaga -cttttcaggagttcggtcgcgtgtccctatgtgctaatatataagttagatcgcattaga -tgctaatctgaatacttatagacgaccttcaacgagaacgggtaccaccttgaggctaga -gttaggtgtgaaacgacaggtagggacatataaaatttgagtgcggctttagttaagggt -ttaattacctactcaaacatcacgctcgcgcccttcgtacgtaatcgaccatctagaggc -taaggggactgtactaggtagtgattaatgatatcctagacgcacgtgccttagatcttc -agactctgatggtccgcgatcaccgtaattgtagtcctccaactcgatcactttgttggc -gtcaaagaaattacgatatctaaatacttataatacaataaccaaggatgagaatgactc -atcgcgttggagttatattgcttgaagttctatggaatgaaagcacgttatctgccgtcc -caatatctccagtgagctaattcattggacggtccactttgatcaatccccgaggagatg -ttcggacactttagtctgtaacacttagcgttgagaccacgaacaattgattactcagtc -ttgaaggtgttttccaaagttcattttaaataagactacgataggcctttcctattgata -taaactacccggctctgttgttcgtgtgagtcgtacttctctgtgtttttctgattatag -caagattcgattcttagtgtaaacagcgatttttatttgacccgtcaatgagaagcgcat -aggatctaagcaaaattatcaagttgtgccacaaggtaagatctttccagttattgcagg -taggatgtatcccacgttgatagtatgaggtctgacgtcaactgtctaggagagttgacc -gcgtgcgggtacaccggatttgcatcgatgttgagaacgcagaactcccactgtcgtggc -ggcgttcctgatatttagcaagaggcgttgataaagccctcatcatctagatctcgacct -catctgccctcttgctccatcattttctacacagactactttcctatctacgttagtata -attgctttctatcttagtatcatttagagcttctccgtcaacaggttcgtgctattaaag -ttagtacgaaagggacaacttgtagcaacgcatttaatcggttttcgactacttcgcaca -aaatcagataaagaagtttgtcattctattagacattgaattgcgcaattgacttgtacc -acttatgatcgaacactgaatcaagactgtgattaactaaaatagacaagccactatatc -aactaataaaaacgcccctggtggtcgaacatagttgactacaggataattaattggact -ggagccattacattctctacaatcgtatcacttcccaagtagacaactttgaccttgtag -tttcatgtacaaaaaaatgctttcgcaggagcacattggtagttcaatagtttcatggga -acctcttgagccgtcttctgtgggtgtgttcggatagtaggtactgataaagtcgtgtcg -ctttcgatgagagggaattcaccggaaaacaccttggttaacaggatagtctatgtaaac -ttcgagacatgtttaagagttaccagcttaatccacggtgctctactagtatcatcagct -gtcttgcctcgcctagaaatatgcattctatcgttatcctatcaacggttgccgtactga -gcagccttattgtggaagagtaatatataaatgtagtcttgtctttacgaagcagacgta -agtaataatgacttggaataccaaaactaaacatagtggattatcatactcaagaactct -ccagataaataacagtttttacgatacgtcaccaatgagcttaaagattaggatcctcaa -aactgatacaaacgctaattcatttgttattggatccagtatcagttaaactgaatggag -tgaagattgtagaatgttgttctggcctcgcatggggtctaggtgatatacaatttctca -tacttacacggtagtggaaatctgattctagcttcgtagctgactatactcaaggaacca -ctgctcaaggtaggagactagttccgaccctacagtcaaagtggccgaagcttaaactat -agactagttgttaaatgctgatttcaagatatcatctatatacagtttggacaattatgt -gtgcgaaactaaaattcatgctattcagatggatttcacttatgccttagaaacagatat -tgcccgagctcaatcaacagttttagccggaaacaatcgaagcatagggacaatgtatct -tttcctaaattgccatgtgcagatttctgagtgtcacgaagcgcataatagaatcttgtg -ttgcctcaactcgttgaaaagtttaaaacaatcgcagcagtctttttggggtctactgtg -tgtttgcaaaataactgaaagaaacgcttgaacaactctgaagtagctcgagtactcatt -aaagtgtaacacattagtgaatatcggccaatgaaccaaacgcttcccggtacgctatct -ctctcatcgggaggcgatgtgcaggttatctacgaaagcatccctttacgttgagagtgt -cgatgcatgaacctcattgtaacaatagcccagcaaattctcatacgtgcctcagggtcc -gggcgtactcctccatggaagggcgcgcatctagtgttataccaactcgctttttaacta -ctatgctgtagttctacaggcatagtggccagtattttctaacttctctggatagatgct -ctcactcctcatccatcacggcttcagtttacgtcttacttgcttgttcagcaacggatg -gaggcattaagtatcttcactgttccctaaaattgctgttcaatatcaaagtaaggacga -tacagggaaagctcaagcacactcattgaatactgccccagttgcaacctcacttaatct -gacaaaaataatgactactctaagtgttgcggaagcagtctcttccacgagcttgtctgt -atcacttcgtataggcatgtaactcgatagacacgaacaccgagtgagaaactatattct -tgcttccgtgtgtgtgacaccaggtaattgatgcggatataagctggagatcactcacgc -ccacacaaggcgctgctacctctttattccaatgtgtaagaatttgctaacttcatttct -agaccgcagctttgcggtcataatttcacggtacggacccttgggttagagacttgataa -cacacttcgcagtttccaccgcgcacatgttttagtggcttctaacatagaatttttgtt -gtgacataaagagtgcgtgggagacttgcccgaccgttaagccataatcaattgaaagcc -ccgtgagtcacatctaattggttgtactgcgcatttagctatcctttagctgactcgaag -agattcgattcctaatataggttaattagatggctgccgcgcgaagtaaaacgtgaaaaa -cgtagtgcgcagatctgcataactcgcgcttaattacttatgagtagttccaagttcgct -acgttatgagagagattggaattaagcaaatatgttttatggtgattttgggatgagaag -gactgctaagtacggctactaaacaaatttctaaaaccgccatctaccttatcttggaga -catttaagttgtatatgtcactagtctagcttttgtctgtgggacgcgttctcggaatga -gggaaatgcaagagccgattcatcaaatgcttatctaagaaagtagtggactattacacc -aagcacgaatgccagggaactgctttcttgctcaggacctcgcgacaaggtaccccgcat -aagtcctagaattacatttggtcagcaatgctgacatttgaccgtgaaaacataatttta -atcagaaggcagctcacccgcttgctctagatcttatctttgtatgaatgtcagaattta -ctgcaatatccgttccgaatagtgagggcttagtatagttctctgtatacaggtcacatc -aaactccccctgtcctagtacagctctgagctttaattaattgcatacatttccttcaat -catcagatgaaaacaccgcgaatcatgctcttctcgtatagggcaagagaagcaacaaac -aactagcccgactcacgttcatccgccgtatccttgttcagttcttactccgtattaggt -cagcgaaatctaatcagaataatcggtcgcgtatcaaaattaaaatcccgcttgaggttg -acaattaaaacgctgagcagttatcggctattagatagtggggtgaaagtaattggctgg -aattatgttaaaacgtgatattaagctaaaatacgctacttgttgccgacctaattcagt -cattcgatattcagttagagccaagaataacaagcttgtataaattgaacggggtgcact -aaacgatgtgttactctaatattcagcttggagtatacctgaaggcgaattcatgtatcg -gccaataataagacgttgaagatcacaatttggactagcaaaagaaggtgatttatgcgt -ggggattgagtccactgtacgagtacggtctctggaaaattataggttcagggaatataa -ggaagtaaagataattaccaagagatttttggtatcgctatgacccagaggtgttctaac -gtctgttttgatccgcagaatttctgcctcaatgcatatttgacggacttgaactagagc -ctctaaagttaaatggcgacgcaactgttcctaaacttcaattattactactcttttttt -cctagggtattgtagaggccagtggacaaaataaatcaaatttaagatgtttcggacatt -aacatcccccgtagcatagaaatcatcagttatccaatctctcatcgagcttttacaatt -tctgctggcgctatggacagcatatgccgcgagacctccgcaagactcacttgatcactg -taagtatcttcattagaggttagagcctatagttaagctgctgacctagtaaaattggta -ttttctaattttattgctcaagttaaaggttagtgaagggataatgacgttatttttgaa -caatgggttgtattcaattttatatcacgaatggaacccttcattcccggcataatacta -gacgacacgaacaagctccgatctatcagccaggcacgtgttaaggtttaattccggcaa -accaatgaagcatcaaaaggtgacctgatgcaacttagggtcacgatgagtttttcagga -ctacttattacctattaataagttaacatgagccttcataccccgtaagacaatacatac -tccaccaattagaattctgagccatcttatctttttgtatcatcgaagggtatggccgaa -taggttaattagttactcctaacgtctctacaggcatgcatttgacgcaccttcgaaaat -agtcaatctctcgccacacgcgtctagtatgcagcatcaaaaatatagtccacggtttcc -ggattaccaaacgcggcaaagagaaacattgtatcgacggagataacttaatacagaagg -aaggggcatcttcgaatacggatgaataattctatctgtttattctgacatcttgttttc -aggttaatcttacgcattcaaatgacgcctgccccatgcgtgcgcaattattttctaata -ttgacgagagcaatctcactccttttgggtctatttatgttttattgaggcacaagccta -tacagaacaggtactattaaggccgtgagtgtgagactcaaaccgtggaaacaaaggatg -ggttgttcttggtacaagttttagtgcatgtgggcaatccttaccaaaatcagatgctat -ccttaactttgggctgcatttaagatggcggttggaggcctgtgagaatcctgcgtgtca -tctttaatgaccgaattcatccatgtagattcagatcacacactcattccttgatgttgt -ctaaacaaaagttgttgtggacgcattggagggagttaagtaacaacttgggatcgcata -cttataaaaattatatgttaaactttcacaaacgctgaagtccaaagtaactagcccaaa -cgcctcgagagtcactaggtattaatggtgtttgagttcctgtgaaatagtgttcgaagg -taaaatttatgtaccaaatcgaaagaacacttaataaggcttgcttgcacggaggtatga -tgtttactgactctacaaccctaattttccagtacgtacattcattccaataggttagtt -ctcaaagtgctatacaggctcctcaattgatgatatgcttcagccgctctatggatatta -gctcattttatttaggaagcccgcttagaggcttactatgagggaaatgccaaaatgtca -tacttttcggtgtgtcccatatgacaccgctttacatagaatttgaattaaaacgcgctc -tcccgttcactaccatacttggtaccgtgcgcatattacatatagatataggatcatttt -ttaaagctgtactaggtttgatcgacaatcttatgctatactatatgatgtaaccctcat -aatcaataccgatcgtacgatcctagcataggtggcaagcgattttatgccgattattgt -gttaaatagtctgtgagtgtgattatcagggctacgttggtagaggggttgtatagacct -cgcacacattgtgacatacttaacaatatacgaaaactgatataataaatccccttaccc -aaacaccaatcccgttgaatcaactaccataacgtctcccatataaattgcctacttgtt -tgcataaatctgaatacataacaccattgcaccttcttgtgttccaatcccgttaagatt -gccttgtcagatgatatgcaagaacaatagcatttgctagcaattattaacagctcttcg -aattgcctccacataacgcgggagggtatattttaatttggcaaatactaagtactgttg -gcgtcatatgctattaacggttggatattaagttatgtcagccgtaagcaagagtgggcg -aaatattttgttacccagtgagagcactcttagagtttggatacaataggccatatgttg -acttaagaggacgtaactacgccgtacaccattgttcaaccgacttcttggcaaatagaa -tcgtattagcaatcttaagaatagagacacgttcgtgttagggtatactacaaatccgaa -aatcttaagaggatcacctaaactgaaatttatacatatttcaacgtggatagatttaac -ataattcagccacctccaacctgggagtaattttcagtagatttactagatgattagtgg -cccaacgcacttgactatataagatctggggatcctaacctgacctatgagacaaaattg -gaaacgttaacagcccttatgtgtacaaagaaaagtaagttgttgctgttcaacagatga -tagtcatgacgcgtaacttcactatagtaaattgaaacaaatacgcaatttagacagaat -ggtacggtcatgaatgacagtaattcgaagtgctagaccaacttaaaataggtaaacgtg -cccgaaaccccccttaacagaaagctgctatcatggtgcagtatcgacgtgttcagaaac -ttgtaacttttgagcaggtccgagcacatggaagtatatcacgtgtttctgaaccggctt -atccctaagatatatccgtcgcaaactttcgatttagtcccacgtagagcccaagcgttg -tgcgactccacgtgcatgcccagaaatacgagtttaaatttggttacatggttaattttg -accgaagcatcgcactttatgattgataattggattcaatatgtcgccctatgcgaatgc -aacatgatccacaatttggctataagacgtttaatccgtatcacactttgtttgcggcta -gtatagtaacgcccgtgcaccaagagtcagtaacaattataagtactccgcaggtacttc -aaatataaaaactaatcaaacacgacccatatgatcatctgaagatatttggaactttct -cgacaaccaccctcgtactcaatacttacactaatcgacaggcacacgcaacgtgtacag -tcgcaccatattgagtcaagatttgcttagtggcgatgagcgtacacgcttatttctcta -gtcacaattagttatctacgagacatcacgagggagcaaataagcgatgttatggctaca -cataggcacgtatgaatatgatataagccagttaaacagtcgaaccatcgagcaaattct -catgcaccaacccacacgttgaggcacaaagagtaagctgtttgaatgtaacttcttctg -ctgagcgggccccaacgtaaggatcaactagaagagaaaactcggtattagtttaaatgc -gtcacggagcatgagtgcatttcactaagaatgtctgtgtaaccaatataacatctattt -gttatctgattgcctacttatggctttgcggtcgtggcgactaatgtctccaatcctttt -gaggtcggtaccaactccctttaaattacgctgtgcaggctcatgcactgcatacatata -cggtagcaggtagggacctcacgcacccttattataatcaatagtagttatcagtcaacg -aggcaggaatgctgaggtcgaggtgttggtatattttctatgtgccgtctaggcgactat -cacgcattaccaggcgagatttaagccaattttgaatatagtcaacgtaatttttactat -gggttccaccgaaacgccttgcacaactaagaatcccataaaatatcgatatcaaataaa -agattgtgtcaataccttcatatatattttttcggttgactaacgtgaactaaggttagg -ggttttgtatgtctatataggaaacagtttcttttctgtcctactttagtaaagtcttca -agccttactccaaaatcacggtgattaagccgttactcagcagcatgattctgcctgctc -gggtcctaaaatccagccttgtaagagtcgctgtgtattagctagggagacctttgttaa -aaaggatatatcgcggcgggatgtgagtgcgtggcgcatactcaatcttcagctcgtgtc -attataatatctctcccccacgcttttcactagatatgccgtgtaagcaaacaccttatg -cttaatttcgaaaatattggtacttgaaaaaagctgtaggggtacttaatgtctggtagg -agatcaggagagaattgagtgtaaaaccgtaaagccctcacctgacttcatgtaaatggc -ttagaagactccatgatttaataaatactacgaaggaaagactggatctaaagataactc -tagtaaggccaactcccttcaatgctgttgccagttataatccaagagctgtccttttct -gaaccatagcggcttctgaagcgaactagaagcaaagttggttctagccagacagccaca -taccctgtacgggtgtattactaaaactggtccggtattagttcaccaagggaggaatta -ggcaaaggatctaggtatgcaagtcggagtattacatccctaccctgaatccatcaatag -gttcctctgtactggccttcgcaatgagtattcaaggttgtacagccgtataataataag -atagtgactatgaacgggaagtaacccgctcaccttccccaaaacattgttatatctaag -tattaaagtctgccgtagtgttaatactcgaaaataaacaactggcaaattacaccgcac -ttaagccgcttttgatttatatttttccaatgcgcttttaaaaataattcagtcctacat -actaattaagacccttaaacggagatatcacaagttaagttttaaccatctcgactaggt -ggaactatagatacccaactcaatttatcattacctgtaatgttcctagaaggattgcat -ttcatgtcaagacggtggagtttcacagcgaaacttcagtgtgaacagattctgagaaat -cacctaaacctattagtcagagcacccggttagaaccagttgtcaaaaaatagagcggtt -gcatgagacagaagtaacgatgagatccgttgtaacgttgagacatctggcctatcgtca -atacagtcctcccttaaaaatatttttaaatactaggcaaacccaacataggttagtcct -atgtgatacgccacatggtatatcattttgtaacgttacctagggataatcaggaagtgg -aattacgcaaaagtagacagtgaaatgcttagggttatagtctagtccaaagataaagga -taaagcacgtcagagaactatattagccgaatgggaatcattgttaggagactgtggatc -atgtctaaaaagcaacgcagaaacagtcatcgaaaaaatctcgtttttgtttgaatctaa -aagagctttgatgaccgatagtacctgtatactagttactgtattacgtgtctaatgatt -tcggattggggtccccagaatcagacgtcattgtagacgattcaagtttaccaatttaat -ttcccagctctccttggagaactatcgccaataattgcagtcactttccttttctgaaac -gataaagccgtcagagttctctgcaacgttggacttacctgaggttctaacccactttcg -gttctaatagtagttaacgacacaacgaataacctttactgtggggctttcacgatattt -tttcgcttattattaatggttacgtcataagctggtgtccaaattaaggttaccggcttc -gcagagtagttgtatccaagtataacttccctaatcataagatcgaggtagaaaattaat -gctgtctctaaccgaacagatatgtcccactatgtggtatggacgttgctaattacttct -gaagggaaattggtcattatggatacgtgtctaccatcaggtcggacgcagatatggttc -tgtcttcagttgatccaccgttctttataggataataactgacgattaaagattatggta -aatagattaagccaattctcttcttgtcagtgaagcatccttaactgacttgctctgcag -cccctcatacatttagctattcaaagtaccggctcgtttcaaactctcccacctttggaa -gaggttgtcaacttgataagtatatcatttacagcattttttcggacgtacctctaatgt -ttcattgcagaaaattagttttttctatcgcacattttgcaagtaacgttagagacacaa -ttatctgcgaatgaactgctagatctgacgaccgggagcctcgcaaatatcaaaaaagac -tgacatatatcaaggagtcgttgacaagtgctggtaagtcaattggtttatctgtcccgg -cgtttcgatcttaagctgaccatgcacggcagagtaatgtcactctcgttcttacaagtc -tgtctccaagggtcggcaaaaaagacccctccattctcgagcccactcacgatatgtagg -gacgacaacttgtgcggcttatgaattgtctggactgcgggcgagggtccatatctccga -agttagaagggacatacctttagatgataagatcaattcttattgacgaaattcatccac -aacggggaacaacttcaccctagacttacgtctgaaaagacacctagcgtcttataaaag -gtcagtgccccgtttcgtaaggctggaattacctacgcaaacttaaacctcgcgcccttc -cttacgtatcgacaagatagaggctatcgcgaatgtactacggaggcatgaatcatatac -tagaaccaagtgcctgtgatattaacaagatgatccgacgcgagcaccgtaattctaggc -ataaaactccagcaatttgggggccgaaaacaaatgacgttagctaattaattatatgac -atgatcaaaggaggtcaatcacgcatcgagttcgacgtatattcattgaacttcgtgcgt -ttgaaagaaacttttatgaaggcaaaattgatcctgtctcctatttcatgcgtacctcct -agttgataattccccgagcagtggttaggacacttttgtcggtatcaagttccggtctca -aaacgtaaaattctgtaatctgtatggatggtctgtgaattagttaatttttatgaagtc -gtcgagacgcagttcctattgatttattctaaacggagatgtgcttcgtgggactcggaa -gtagatctgtgtttatgattattgctactttagatgctgactgttaactccgtgttgttt -ttcaaccgtatatcacaaccgaattggatagaacctatagtttcaagttctgccacaagg -tatcatatttacagttagtgctggttgcttctttcaaacgtggtgagtttgtgctatcac -gtcaacggtagagctcagtggaccgagtgcgcgttcaaccctgttccagagagggtgtga -tagcacatataccacgctcgtcgaggcgttcatgatagtttgcaagagccggtgttaaac -acatattattattgttatccaactaatcggacctatgcataaagcattgtctaaacagaa -taattgcctatatacggtagttttagtgatttatatcttagtatcagttagagcttcgaa -ctcttcaggttcctcatatttaacgttcttcgaaagcgaaaacttctacaaacgaatgta -agcggttttccaagtagtacctataaatcacagaaagatctgtctcagtatagttgaaat -ggtattcagctagtgacgtgtaccaattatcatagttcactcaagcaagacgctcattaa -cgaatatagacaagacactatatcatataataaaaaagaacatggtgctcgaacatagtt -gaattcaccatattgaaggggaatgctgacatgtaattcgctactagacgatcaattccc -tacttgtcaaagttgaactggtacgttcttggaattaaatatgattgcgctggaccaaat -tgcgacttcttgagtttcagggcaaacgattgagccggaggatgtccgtctcttaccttt -cttgcttatgataaacgacggtccctgtacatcactgggaattctcagcaaaaataattg -ggtaaatcgagactcgatgtattcggccacaaaggtgttagacgttaaagattattcaac -ggggcgataataggatcataaccggtatgcaagcgcattgaaagagccatgagatcctta -tccgataaacgctgcacggtatgtgcagccttattgtcgatcacgaatttataaatgtag -tctgggctgtaagttgaagacctaagttataatgaagtgcaataccaaatcgattcatag -tggattatcagactcaagatatctcctgataaattacagttgttaagatacggataaaat -gagatttaagattagcagcctctaatctgtttcaatcccgttggaatgtggtatgcgatc -aaggttaagttaaaatcaagcctgtcttcagtcttgattcttgttctgccatcgcatgcg -gtctacgtgagttaatatgtagcttacgttctagcttgtgctaatctgagtatagattcg -tagaggaatattatcaagcttccacgcctcaacgtacgtgtattggtcacacaagacact -aaaagtggaagtagcgtaaactatagtctagttgttaaatgctcagttcttgttatattc -gatatactcttggctaatttatgtctgagtatataaaattaatgatattaacttgcattt -cacggatcccttagaaaaagattttgaccgagcgcattataaacggttacaccgaatcaa -tagaagcatacccaatagctttctttgaatttattgcctgcgcaacttggctgactctct -agatccgaataattctatatggtcgtgacgaaactagttcattactgtttaaaatgccaa -catgtcttttgggccgataatggctctttgcaaaattactcaatgatacgattgatcaaa -gcggtagttgctagtggtagcatgtaagtctatcaaatgtctgattatccgaaaatcttc -caaaagagtccacgtaccatatctatctcatagcgacgcgaggggaaccttatctaacta -tcattccatttaccgggtgactctcgatgcaggatccgattgggataaattgcccagaaa -tggctcattcctgactaagggtaaggccgttctcagcaagggaaccccgcgaatctaggc -ttataccatctagattgttaactacttgcctgtagttctacagccatactggacagttgt -ttctaaatgatcgggattcatgctagcactcctctgaatgcaccgcgtaagtttaactat -tacgtccgtgggcagataaggatggaggctgtatgtatcttaactgttacctaatatggc -tggtaattatcaaagtaaggaccttaatgccatagcgctagcaatcgctttgtatactga -ccatgtgccaacctctcttaatctgtaaaatataatgtcttagctaactgtggacgatca -tgtctctgcctagagcttcgctgtatcaattcctatagccagcgtactagtgacacaaca -acaccgtgtgagaaaagatattagtccttacgtctgtctctctacagcttattgatgagg -attgaacatggacatatagctccccctcaaaagcagatgctacctctttattccattctc -gaacatttgccgaacttaatttcgacaaacctgaggtcacgtcttaatttatcggtaacg -tcacgtccctttgagactggataaatatattaccaggggccaacgagcaattgttggagg -cgcttctataatacaaggtgtcttgtcaaagaaagacggcgtgcgtctcgtgcaactcac -ttaaccaatattaatgtgaaacccccctctctcacatcttatgcggtgtactgccctggt -acatttcctgtacaggactccaacagtgtagattcctaagatagctgttggagttgcctc -acgccagatcgaaaaactgaataaactagtgagctgagctgcagaaataccgcttaatta -cttatgactagttcaaagggacctacgtgatgtcagacattgcaaggaagaaattaggtt -tgtgcgtcattttggctggactagcactccttacttcccctactattcaaatgtcgtaaa -cagcatgagacaggatcgtgctgacatttaaggtctattgggaacgaggctacctttggt -cgcgcgctcgcgttctccgaatgaccgaaatgcatgagcacagtatgcaattgcttatag -atctaaggtctggtcgttgaaaccaagcacgtaggcctgggaaatcagttcttcctcagc -aactacacaaaagcgtccaagcattagtacttgtagtaaatgtccgaacctatgcgctca -tttgaaagtcaaaaaatatttttaagcagtaggcacctaacccgattcctctacttagta -gctttctttgattctcagaattgactgcaatatcactgcacaattctgtgccattactag -acttctctgtattaacgtctcatcttactaacactcgcctaggacacatctgagagtgaa -gtatttcaatacatttactgaaatcttcagttctaaaatccccgaataaggctcttatcg -gtttggccaacacaagaaaaaaacttcttgcaccactcaccttcatacgcaggagcctgg -ggaacttagtaataactatttcggcagacaaagcttataacaagttgccggcgcgtataa -tatttaaaagaccccttgagctgctcaattaaaacgctcacctggtataggctattagat -agtgccgtcttagtaaggggcgggaattatcggataaactgatattttgataaaataacc -gacttgttcacgacataagtcactaaggagattttatctttctccaaagtatatcttcct -tggataatttcaaagcgctgcaatttaagttctgttactagtttatgctgctgggaggtg -accggaaggcgtagtaatctagaggcaaattataagaagttcatcatatcattttcgact -acaaaaacaaggtgttgtatgccggcgcattgtgtaaactggacgagtaccctagatgga -aaattatacgttaagccaagatttcgatgtaatgataattacctacacatttttgctatc -cataggaacaagagctgttctataggctcgtggcatacgaacatttgctgccgctatgaa -tattggaagctcttcaactacagactctattcttaattgccgtcgaaaatgggccgaatc -ggctattattaatactcggtttttccgaggggattgttgtcgacagtcgtaattattatt -aatattgatgttggtgaggtcatttaaatacaaccttgcagacaatgaataagggatcca -atctctcatactccttttacaattgctcatgcccctatgcaaaccttatgccgccacacc -tccgcaactctctcttctgaactgtaagtagcttcattactggtttgagactatactgaa -gctgatgacattctaaaatggctattttcgaatgtgattcataatgtttatcgtttggga -tggcagaatcacgttatttttgatatagcccgggtattctattgtatagaacgtatgcta -caagtcattccccgaagaagactagaagtaaacaacatgcgaccatcgttaagccacgca -aggctgtagctttatttcccgataacctatcttccataaatagcggacagcaggatactg -acgctcaacatcagtggttatggtctaatttttaacttttaataaggtaacttcagcagg -catacacagtaactctttaatttataatcaaattagaagtctgacacttcttatattttt -ctatcatccaacgcgatcgcccattagcttattgtgttactaataacgtatctaaaccaa -tccttttcaagctactgcctatattgtcaatatatacaaacaacaggatagtaggctgct -taaaaaatattgtcaaccgtgtacgctttacaatacccggaaatcacaaactttgtagac -aacgagtgaaatttatacactacgaagggccagcgtacaagacccatgaattaggcgata -tgtttattctgacatattggtttatccttaatctgtcgctgtaaaatgaagccgccccca -tccctgcgaattttttttcgaagattcacgactgaaatataaatacgtttggctatattt -atgttggagggaggcaatagcctttactgttaaccgaagatttagccagtgagtgtgaca -ctaaaacactggaataaatgcaggcgttcttctgggtaaaaggtttagtcaatctcgcct -ataagttcatatagctctggatataattatctggcccatgcatttatcatggcgcttggt -gccctgtgtgaagccggcctctcatattgaaggtccgaagtattccatgtacattaagat -cactctctcattcatgcatcttggcttaacaaatctggttgtccaagctttccaggcacg -tatggtacaaattcggatcgaatacttataaaaatgatatgttaaactgtctaaaacgct -catctacaaagtaaagtgcactaaccaatagagtctcaagaccgtgtaatgctggtgcac -tgaatgtgtaatacggttagaagggattagttatgttacaaatccattgaaaacttaaga -agcattgcgtgctcggagggtgcatcttttatcaagagactaacattattttcaacgacg -tacatgctttacaatagggtacttatcaaacgccgagaaacgcgcctatagtgatgttat -gattatgacccgatatccattggaccgaattttatgtaggttcccagcgtactcgcgtaa -tatctcggtattgccataatgtaatacttgtcggtctctcccagatgaaaaagcgttaca -gagtatttcaatgaaaaacagcgcgcaacgtcaatacctttaggggtaacggccgctgat -ttcatatagatatacgataagttggtatagctctactaggtggcatccacaatcgttgca -tttactatagctggttacaatcataatctataccgttccttacatactaccatagcggga -tagcgtttttttgccgttgattgggtttaagaggatgtcagtctcattatatccgattcg -gtgggagagccgttgttttcaaatcgcacactttgtgacataatgtacaagataacaaaa -ctgatataagatataaactgtcaatatcaccttgacacttgaatcaaagtaaattaactc -gcaaatataatttgactaattgggtgcagatttctcaattaataaaaaaatggcaccgga -tgggcttacaagccccttatcattcacttgtatcatgatttccaagaacaatagaatttg -ctagcaagtatgaacagagattcgaattgcatccacagtacgccggagcgtttattttaa -tgtggatatgacgatgtactgttggcggcatttgctagtaaccggtccttatttacgtag -cgcacacgtaagcatgtctgggagaaatatggtggtacaatctcagagaaagattacagt -ttggtttaaataggacttatcgggtcggaagtggaacttaataagcagtacacaattggg -caacagacgtcttgcctattacaataggattacaatgcgttagatttcagacacgttcgt -gtttggctattcgtcaattccctaaatagttagacgatcaactattatcaaagtgattct -ttgttcatcctccattcatgtaacagatggcacactacgcataacgccgaggaattttaa -cgagatttaagagagcagttcgggcacaacccacttgactttataacagctcggcagcat -aaacggtaatatgtgacaaatttccaaacgttataagaacgtatgtgtacttagaaaact -aagtggttcatgttcaacagatgtgacgcagcaagcctaacttatctattggttttgcta -taaaagaacaaagttacacagaatcctaagggcttgtttcacacttatgcctagtgcttc -accatcttaaaatagcgaaaccggcacgaatcaaaccttaaaacaatgcgcagatattgg -tgatggtgactccgggtatgataatggtaactgttgaccagcgcccacctcatcgaagta -tagaaagtggttaggataaggatgagaccgaacttatttccggccataactttagatttt -ctacctagtacacaacatcagggcggacacgaaaccgccatcacatcatataccaggttt -aatttgcttaatgggggaagtgtcaacgaaccttcgaactttagcaggcatatggccatt -atatatggccccagagcagaatgctacagcagacaaaatttggatttatgtagtttaata -cctatcaaacttggtgtgaccatacttgtctaacgacagtgcacaaagtgtaagttacaa -ttattactactcagcagcttctgcaatgataaaatcttatcatacacgtcacatatgata -atatctacttagggggaacgggctccacaacctacatagtactcaatacttacactattc -gacaggcacaccaaacctgtacagtcccaaaagattgagtcaactttgcagtactgcaga -tcacagtaatagcttagttagcgagtcaaaattagttttctacgagactgcacgaccgtg -caaatttccgatgtgttggctacaaatagcaacgtatgaatttgtttgaagccacgtaaa -ctgtacaaccttagagataagtctcaggctactaaaaacacgttgtggcactaacaggat -catggttgattcttacttattcggctgaccggcccaataagtaaccttcaactagaacag -aataatcgggagtagtttaattcagtcaaggtgcaggtctcattgtaactaacaagctct -gtgtaaccaagttaaaatcgttttcttagcggattccctacttatggatttgagctcgtc -cacaatattcgatacaagaagtttgtggtccgtaacaacgaaattttaattacgctgtgc -agcctcatccaaggaattaatagaaggttgatggtaggctccgaacgctccatgattata -atcaagtggactgtgcagtaaacgaggaaggtatcctgacgtcgtggtgttcgtttttgt -tatttgtgccctatacgagtagataaaccatgaacagcacagtgtgaacccatggttgat -tttaggctaccttatttttaatttccgttacacagaaacgaattccacaactaacatgcc -attaatttttcgatatcttataaaagatggtcgaaattcattcatttattttttttcggt -tctcgaaagtcaactaagctgtcgcgttttgtttctctttagaggtaaaagtggctttga -tctcctacgtttggatactagtcaaccattactccatttgatccgtgagtatcacctgtc -taacatccagcattatgactcctcggcgaagaaaagacacacttcttagagtcgatgtgt -attagctagggacacagttgtttaatacgatagtgagcccagggagggcagtgcgtcccc -cagtagatttattcagctagtgtaagtataagatatctcacccacgaggttcaagtgata -tgcagtcttagaataatacttatcctgaatttcgatattatgggtacttcaataatccgc -tagcgctactttatgtctcgttggacagcaggacacatggcagtcttaaacactaaagac -atcacctgaatgaatgtaatgggattacaagaatcaatgaggtattatatacgacgtagg -aaactctggatatatacagtaatctagttacgccatcgcacttcattcctctggaaactt -agaagacatcagctgtacgtggaggaaccagacccccgtatgtagccaaatagaaccaaa -gttgcttatacaaacacacccaatgacaatggaccgctggagttcgtaaactcggaacgt -agtactgcacaaacccagcatttagcaataggagctacgtatgcaactcccacgtggtaa -taccttcaagctatcaatatataggtgcctagctaatcgcattcgcaagcagtattcaag -cttgtaaaccagtataataattacagaggctctatgaaacccaactttccagctaaaagt -cccaattaaatggttatttc diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/knucleotide.cc b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/knucleotide.cc deleted file mode 100644 index 48f5c79..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/knucleotide.cc +++ /dev/null @@ -1,236 +0,0 @@ -// ------------------------------------------------------------------ -// run with: knucleotide 0 < ../examples/knucleotide-input.txt -// ------------------------------------------------------------------ -// -// output should be: -// -// T 31.520 -// A 29.600 -// C 19.480 -// G 19.400 -// -// AT 9.922 -// TT 9.602 -// TA 9.402 -// AA 8.402 -// GA 6.321 -// TC 6.301 -// TG 6.201 -// GT 6.041 -// CT 5.961 -// AG 5.841 -// CA 5.461 -// AC 5.441 -// CC 4.041 -// CG 4.021 -// GC 3.701 -// GG 3.341 -// -// 54 GGT -// 24 GGTA -// 4 GGTATT -// 0 GGTATTTTAATT -// 0 GGTATTTTAATTTATAGT -// ------------------------------------------------------------------ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -// ------------------------------------------------------------------ -constexpr size_t thread_count = 4; - -struct Cfg { - unsigned char *to_char; - unsigned char to_num[128]; - using Data = std::vector; - - Cfg() { - static unsigned char __tochar[] = {'A', 'C', 'T', 'G'}; - to_char = __tochar; - to_num[static_cast('A')] = to_num[static_cast('a')] = 0; - to_num[static_cast('C')] = to_num[static_cast('c')] = 1; - to_num[static_cast('T')] = to_num[static_cast('t')] = 2; - to_num[static_cast('G')] = to_num[static_cast('g')] = 3; - } -} const cfg; - -// ------------------------------------------------------------------ -template -struct Key -{ - // select type to use for 'data', if hash key can fit on 32-bit integer - // then use uint32_t else use uint64_t. - using Data = typename std::conditional::type; - - struct Hash { - Data operator()(const Key& t)const{ return t._data; } - }; - - Key() : _data(0) { - } - - Key(const char *str) { - _data = 0; - for(unsigned i = 0; i < size; ++i){ - _data <<= 2; - _data |= cfg.to_num[unsigned(str[i])]; - } - } - - // initialize hash from input data - void InitKey(const unsigned char *data) { - for(unsigned i = 0; i < size; ++i){ - _data <<= 2; - _data |= data[i]; - } - } - - // updates the key with 1 byte - void UpdateKey(const unsigned char data) { - _data <<= 2; - _data |= data; - } - - // masks out excess information - void MaskKey() { - _data &= _mask; - } - - // implicit casting operator to string - operator std::string() const { - std::string tmp; - Data data = _data; - for(size_t i = 0; i != size; ++i, data >>= 2) - tmp += cfg.to_char[data & 3ull]; - std::reverse(tmp.begin(), tmp.end()); - return tmp; - } - - bool operator== (const Key& in) const { - return _data == in._data; - } -private: - static constexpr Data _mask = ~(Data(-1) << (2 * size)); - Data _data; -}; - -// ------------------------------------------------------------------ -template > -using HashTable = phmap::flat_hash_map; - -// ------------------------------------------------------------------ -template -void Calculate(const Cfg::Data& input, size_t begin, HashTable& table) -{ - // original implementation fully recomputes the hash key for each - // insert to the hash table. This implementation only partially - // updates the hash, this is the same with C GCC, Rust #6 and Rust #4 - Key key; - // initialize key - key.InitKey(input.data() + begin); - // use key to increment value - ++table[key]; - - auto itr_begin = input.data() + begin + thread_count; - auto itr_end = (input.data() + input.size() + 1) - size; - size_t nsize = std::min(size, thread_count); - for(;itr_begin < itr_end; itr_begin += thread_count) { - // update the key 1 byte at a time - for(unsigned i = 0; i < nsize; ++i) - key.UpdateKey( itr_begin[i] ); - - // then finally mask out excess information - key.MaskKey(); - - // then use key to increment value - ++table[key]; - } -} - -// ------------------------------------------------------------------ -template -HashTable CalculateInThreads(const Cfg::Data& input) -{ - HashTable hash_tables[thread_count]; - std::thread threads[thread_count]; - - auto invoke = [&](unsigned begin) { - Calculate(input, begin, hash_tables[begin]); - }; - - for(unsigned i = 0; i < thread_count; ++i) - threads[i] = std::thread(invoke, i); - - for(auto& i : threads) - i.join(); - - auto& frequencies = hash_tables[0]; - for(unsigned i = 1 ; i < thread_count; ++i) - for(auto& j : hash_tables[i]) - frequencies[j.first] += j.second; - - // return the 'frequency' by move instead of copy. - return std::move(frequencies); -} - -// ------------------------------------------------------------------ -template -void WriteFrequencies(const Cfg::Data& input) -{ - // we "receive" the returned object by move instead of copy. - auto&& frequencies = CalculateInThreads(input); - std::map> freq; - for(const auto& i: frequencies) - freq.insert({i.second, i.first}); - - const unsigned sum = static_cast(input.size()) + 1 - size; - for(const auto& i : freq) - std::cout << i.second << ' ' << (sum ? double(100 * i.first) / sum : 0.0) << '\n'; - std::cout << '\n'; -} - -// ------------------------------------------------------------------ -template -void WriteCount( const Cfg::Data& input, const char *text ) { - // we "receive" the returned object by move instead of copy. - auto&& frequencies = CalculateInThreads(input); - std::cout << frequencies[Key(text)] << '\t' << text << '\n'; -} - -// ------------------------------------------------------------------ -int main() -{ - Cfg::Data data; - std::array buf; - - while(fgets(buf.data(), static_cast(buf.size()), stdin) && memcmp(">THREE", buf.data(), 6)); - while(fgets(buf.data(), static_cast(buf.size()), stdin) && buf.front() != '>') { - if(buf.front() != ';'){ - auto i = std::find(buf.begin(), buf.end(), '\n'); - data.insert(data.end(), buf.begin(), i); - } - } - std::transform(data.begin(), data.end(), data.begin(), [](unsigned char c){ - return cfg.to_num[c]; - }); - std::cout << std::setprecision(3) << std::setiosflags(std::ios::fixed); - - WriteFrequencies<1>(data); - WriteFrequencies<2>(data); - // value at left is the length of the passed string. - WriteCount<3>(data, "GGT"); - WriteCount<4>(data, "GGTA"); - WriteCount<6>(data, "GGTATT"); - WriteCount<12>(data, "GGTATTTTAATT"); - WriteCount<18>(data, "GGTATTTTAATTTATAGT"); -} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/lazy_emplace_l.cc b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/lazy_emplace_l.cc deleted file mode 100644 index beb5f2f..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/lazy_emplace_l.cc +++ /dev/null @@ -1,54 +0,0 @@ -// ------------------------ -// Windows specific example -// curtesy of @kanonka -// ------------------------ -#include -#include "parallel_hashmap/phmap.h" -#include -#include -#include - -class srwlock { - SRWLOCK _lock; - -public: - srwlock() { InitializeSRWLock(&_lock); } - void lock() { AcquireSRWLockExclusive(&_lock); } - void unlock() { ReleaseSRWLockExclusive(&_lock); } -}; - -using Map = phmap::parallel_flat_hash_map, - phmap::priv::hash_default_eq, - std::allocator>, 8, srwlock>; - -class Dict -{ - Map m_stringsMap; - -public: - int addParallel(std::string&& str, volatile long* curIdx) - { - int newIndex = -1; - m_stringsMap.lazy_emplace_l(std::move(str), - [&](Map::value_type& p) { newIndex = p.second; }, // called only when key was already present - [&](const Map::constructor& ctor) // construct value_type in place when key not present - { newIndex = InterlockedIncrement(curIdx); ctor(std::move(str), newIndex); }); - - return newIndex; - } -}; - -int main() -{ - size_t totalSize = 6000000; - std::vector values(totalSize); - Dict dict; - volatile long index = 0; - concurrency::parallel_for(size_t(0), size_t(totalSize), - [&](size_t i) { - std::string s = "ab_uu_" + std::to_string(i % 1000000); - values[i] = dict.addParallel(std::move(s), &index); - }); - - return 0; -} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/matt.cc b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/matt.cc deleted file mode 100644 index 51afcce..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/matt.cc +++ /dev/null @@ -1,139 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -// ------------------------------------------------------------------- -// ------------------------------------------------------------------- -class Timer -{ -public: - Timer(std::string name) : _name(name), _start(std::chrono::high_resolution_clock::now()) {} - - ~Timer() - { - std::chrono::duration elapsed_seconds = std::chrono::high_resolution_clock::now() - _start; - printf("%s: %.3fs\n", _name.c_str(), elapsed_seconds.count()); - } - -private: - std::string _name; - std::chrono::high_resolution_clock::time_point _start; -}; - -// -------------------------------------------------------------------------- -// from: https://github.com/preshing/RandomSequence -// -------------------------------------------------------------------------- -class RSU -{ -private: - uint32_t m_index; - uint32_t m_intermediateOffset; - - static uint32_t permuteQPR(uint32_t x) - { - static const uint32_t prime = 4294967291u; - if (x >= prime) - return x; // The 5 integers out of range are mapped to themselves. - uint32_t residue = ((unsigned long long) x * x) % prime; - return (x <= prime / 2) ? residue : prime - residue; - } - -public: - RSU(uint32_t seedBase, uint32_t seedOffset) - { - m_index = permuteQPR(permuteQPR(seedBase) + 0x682f0161); - m_intermediateOffset = permuteQPR(permuteQPR(seedOffset) + 0x46790905); - } - - uint32_t next() - { - return permuteQPR((permuteQPR(m_index++) + m_intermediateOffset) ^ 0x5bf03635); - } -}; - -using Perturb = std::function &)>; - -// -------------------------------------------------------------------------- -// -------------------------------------------------------------------------- -template -void test(const char *name, Perturb perturb1, Perturb /* perturb2 */) -{ - //phmap::btree_set s; - Set s; - - unsigned int seed = 76687; - RSU rsu(seed, seed + 1); - - for (uint32_t i=0; i order(s.begin(), s.end()); // contains sorted, randomly generated keys (when using phmap::btree_set) - // or keys in the final order of a Set (when using Set). - - perturb1(order); // either keep them in same order, or shuffle them - -#if 0 - order.resize(N/4); - perturb2(order); -#endif - - Timer t(name); // start timer - Set c; - //c.reserve(order.size()); // whether this "reserve()" is present or not makes a huge difference - c.insert(order.begin(), order.end()); // time for inserting the same keys into the set - // should not depend on them being sorted or not. -} - -// -------------------------------------------------------------------------- -// -------------------------------------------------------------------------- -template -using pset = phmap::parallel_flat_hash_set, - phmap::priv::hash_default_eq, - phmap::priv::Allocator, // alias for std::allocator - N>; - -// -------------------------------------------------------------------------- -// -------------------------------------------------------------------------- -int main() -{ - auto shuffle = [](std::vector &order) { - std::random_device rd; - std::mt19937 g(rd()); - std::shuffle(order.begin(), order.end(), g); - }; - - auto noop = [](std::vector &) {}; - - auto perturb2 = noop; - - constexpr uint32_t num_keys = 10000000; - using T = uint64_t; - - test, num_keys>("flat_hash_set ordered ", noop, perturb2); - - test, num_keys>("flat_hash_set shuffled", shuffle, perturb2); - - test, num_keys>("parallel (16) ordered ", noop, perturb2); - - test, num_keys>("parallel (16) shuffled", shuffle, perturb2); - - test, num_keys>("parallel (64) ordered ", noop, perturb2); - - test, num_keys>("parallel (64) shuffled", shuffle, perturb2); - - test, num_keys>("parallel (256) ordered ", noop, perturb2); - - test, num_keys>("parallel (256) shuffled", shuffle, perturb2); -} - - - - diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/pmr.cc b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/pmr.cc deleted file mode 100644 index 5c13524..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/pmr.cc +++ /dev/null @@ -1,33 +0,0 @@ -#if __has_include() -#include - namespace std - { - namespace pmr = experimental::pmr; - } -#elif __has_include() - #include -#elif - #error is missing -#endif - -#include - -struct MyStruct -{ - template - using ParallelFlatHashMap = phmap::parallel_flat_hash_map, std::equal_to, - std::pmr::polymorphic_allocator>>; - - ParallelFlatHashMap hashMap; - - // No compile errors - MyStruct() - { - } - - // Compile errors - MyStruct(std::pmr::memory_resource* memoryResource = std::pmr::get_default_resource()) - : hashMap(memoryResource) - { - } -}; diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/serialize.cc b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/serialize.cc deleted file mode 100644 index 0d44109..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/examples/serialize.cc +++ /dev/null @@ -1,204 +0,0 @@ -#include -#include -#include - -#define USE_CEREAL 0 - -#if USE_CEREAL - #include "cereal/types/unordered_map.hpp" - #include "cereal/types/memory.hpp" - #include "cereal/types/bitset.hpp" - #include "cereal/archives/binary.hpp" - #include -#endif - -#include "parallel_hashmap/phmap_dump.h" - -#include -#include -#include - -using phmap::flat_hash_map; -using namespace std; -template using milliseconds = std::chrono::duration; - -// -------------------------------------------------------------------------- -// from: https://github.com/preshing/RandomSequence -// -------------------------------------------------------------------------- -class RSU -{ -private: - unsigned int m_index; - unsigned int m_intermediateOffset; - - static unsigned int permuteQPR(unsigned int x) - { - static const unsigned int prime = 4294967291u; - if (x >= prime) - return x; // The 5 integers out of range are mapped to themselves. - unsigned int residue = ((unsigned long long) x * x) % prime; - return (x <= prime / 2) ? residue : prime - residue; - } - -public: - RSU(unsigned int seedBase, unsigned int seedOffset) - { - m_index = permuteQPR(permuteQPR(seedBase) + 0x682f0161); - m_intermediateOffset = permuteQPR(permuteQPR(seedOffset) + 0x46790905); - } - - unsigned int next() - { - return permuteQPR((permuteQPR(m_index++) + m_intermediateOffset) ^ 0x5bf03635); - } -}; - -// -------------------------------------------------------------------------- -// -------------------------------------------------------------------------- -void showtime(const char *name, std::function doit) -{ - auto t1 = std::chrono::high_resolution_clock::now(); - doit(); - auto t2 = std::chrono::high_resolution_clock::now(); - auto elapsed = milliseconds(t2 - t1).count(); - printf("%s: %.3fs\n", name, (int)elapsed / 1000.0f); -} - -// -------------------------------------------------------------------------- -// -------------------------------------------------------------------------- -template -void testMapSerialization(const char *maptype, const char *fname) -{ - MapType table; - const int num_items = 100000000; - - printf("Building test %s\n", maptype); - - // Iterate and add keys and values - // ------------------------------- - showtime("build time", [&table, num_items]() { - unsigned int seed = 76687; - RSU rsu(seed, seed + 1); - - table.reserve(num_items); - for (int i=0; i < num_items; ++i) - table.insert(typename MapType::value_type(rsu.next(), i)); - }); - - // cerealize and save data - // ----------------------- - showtime("serialize", [&]() { -#if !USE_CEREAL - phmap::BinaryOutputArchive ar_out(fname); - table.phmap_dump(ar_out); -#else - ofstream os(fname, std::ofstream::out | std::ofstream::trunc | std::ofstream::binary); - cereal::BinaryOutputArchive archive(os); - archive(table.size()); - archive(table); -#endif - }); - - MapType table_in; - - // deserialize - // ----------- - showtime("deserialize", [&]() { -#if !USE_CEREAL - phmap::BinaryInputArchive ar_in(fname); - table_in.phmap_load(ar_in); -#else - ifstream is(fname, std::ofstream::in | std::ofstream::binary); - cereal::BinaryInputArchive archive_in(is); - size_t table_size; - - archive_in(table_size); - table_in.reserve(table_size); - archive_in(table_in); // deserialize from file out.cereal into table_in -#endif - }); - - - if (table == table_in) - printf("All checks out, table size: %zu\n\n", table_in.size()); - else - printf("FAILURE\n"); -} - -// -------------------------------------------------------------------------- -// -------------------------------------------------------------------------- -template -void testSetSerialization(const char *settype, const char *fname) -{ - SetType table; - const int num_items = 100000000; - - printf("Building test %s\n", settype); - - // Iterate and add keys and values - // ------------------------------- - showtime("build time", [&]() { - unsigned int seed = 76687; - RSU rsu(seed, seed + 1); - - table.reserve(num_items); - for (int i=0; i < num_items; ++i) - table.insert(typename SetType::value_type(rsu.next())); - }); - - // cerealize and save data - // ----------------------- - showtime("serialize", [&]() { -#if !USE_CEREAL - phmap::BinaryOutputArchive ar_out(fname); - table.phmap_dump(ar_out); -#else - ofstream os(fname, std::ofstream::out | std::ofstream::trunc | std::ofstream::binary); - cereal::BinaryOutputArchive archive(os); - archive(table.size()); - archive(table); -#endif - }); - - SetType table_in; - - // deserialize - // ----------- - showtime("deserialize", [&]() { -#if !USE_CEREAL - phmap::BinaryInputArchive ar_in(fname); - table_in.phmap_load(ar_in); -#else - ifstream is(fname, std::ofstream::in | std::ofstream::binary); - cereal::BinaryInputArchive archive_in(is); - size_t table_size; - - archive_in(table_size); - table_in.reserve(table_size); - archive_in(table_in); // deserialize from file out.cereal into table_in -#endif - }); - - - if (table == table_in) - printf("All checks out, table size: %zu\n\n", table_in.size()); - else - printf("FAILURE\n"); -} - - - -// -------------------------------------------------------------------------- -// -------------------------------------------------------------------------- -int main() -{ - testSetSerialization>("flat_hash_set", "dump1.bin"); -#if 0 - testSetSerialization>("parallel_flat_hash_set", "dump1.bin"); - - testMapSerialization>("flat_hash_map", "dump1.bin"); - testMapSerialization>("parallel_flat_hash_map", "dump1.bin"); -#endif - - return 0; -} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/Makefile b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/Makefile deleted file mode 100644 index 6f1e09c..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/Makefile +++ /dev/null @@ -1,48 +0,0 @@ -PANDOC = stack exec pandoc -- -MATHJAX = "http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML" -FLAGS = --standalone --toc --toc-depth=2 --mathjax=$(MATHJAX) --highlight-style pygments -PNG_IMAGES = $(patsubst %.pdf,%.png,$(wildcard img/*.pdf)) -IFORMAT = -f gfm -FILTER = includes.exe -FILTER_OPT = --filter=${FILTER} # includes.hs - -############################### html -STYLE = css/style.css -TEMPLATE_HTML = template.html -HTML_OPT = -c ${STYLE} --template ${TEMPLATE_HTML} -t html -PGTITLE = --metadata pagetitle="The Parallel Hashmap" - -############################### pdf -TEMPLATE_TEX = template.latex -TEX_OPT = --template $(TEMPLATE_TEX) --pdf-engine=xelatex - -############################### epub -EPUB_COVER = --epub-cover-image=img/cover-kindle.jpg - - -SRC = parallel_hashmap.md - -OBJ = $(SRC:.md=.html) - -all: html - -includes.exe: includes.hs - stack exec ghc -- -o $@ -no-keep-hi-files -no-keep-o-files includes.hs - -html: parallel_hashmap.md $(FILTER) ${TEMPLATE_HTML} ${STYLE} - $(PANDOC) ${FILTER_OPT} ${IFORMAT} ${HTML_OPT} $(FLAGS) ${PGTITLE} -o ../index.html parallel_hashmap.md - -%.pdf: %.md $(FILTER) ${TEMPLATE_TEX} - $(PANDOC) ${FILTER_OPT} ${IFORMAT} ${TEX_OPT} $(FLAGS) -o $@ $< - -pdf: $(FILTER) ${TEMPLATE_TEX} - rm -f parallel_hashmap.pdf; $(PANDOC) ${FILTER_OPT} ${IFORMAT} ${TEX_OPT} $(FLAGS) -o parallel_hashmap.pdf title.md $(SRC) - -native: - $(PANDOC) -s -t native $(SRC) - -native_filt: $(FILTER) - $(PANDOC) ${FILTER_OPT} -s -t native $(SRC) - -clean: - -rm -f *.html *.pdf cppi.epub diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/bench_results/martinus_mod/InsertManyInt.html b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/bench_results/martinus_mod/InsertManyInt.html deleted file mode 100644 index 39059cb..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/bench_results/martinus_mod/InsertManyInt.html +++ /dev/null @@ -1,69 +0,0 @@ - - - -
- - - diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/bench_results/martinus_mod/Lookup.html b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/bench_results/martinus_mod/Lookup.html deleted file mode 100644 index 21ea085..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/bench_results/martinus_mod/Lookup.html +++ /dev/null @@ -1,84 +0,0 @@ - - - -
- - - diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/bench_results/martinus_mod/index2.html b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/bench_results/martinus_mod/index2.html deleted file mode 100644 index d679632..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/bench_results/martinus_mod/index2.html +++ /dev/null @@ -1,12 +0,0 @@ - - - - -

Benchmark Results

- -insert 100m values in map

- -Lookup 100m ints, all present | Lookup 100m ints, few present

- - - diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/css/bootstrap-responsive.min.css b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/css/bootstrap-responsive.min.css deleted file mode 100644 index ab59da3..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/css/bootstrap-responsive.min.css +++ /dev/null @@ -1,9 +0,0 @@ -/*! - * Bootstrap Responsive v2.1.0 - * - * Copyright 2012 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world @twitter by @mdo and @fat. - */.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;line-height:0;content:""}.clearfix:after{clear:both}.hide-text{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.input-block-level{display:block;width:100%;min-height:30px;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.hidden{display:none;visibility:hidden}.visible-phone{display:none!important}.visible-tablet{display:none!important}.hidden-desktop{display:none!important}.visible-desktop{display:inherit!important}@media(min-width:768px) and (max-width:979px){.hidden-desktop{display:inherit!important}.visible-desktop{display:none!important}.visible-tablet{display:inherit!important}.hidden-tablet{display:none!important}}@media(max-width:767px){.hidden-desktop{display:inherit!important}.visible-desktop{display:none!important}.visible-phone{display:inherit!important}.hidden-phone{display:none!important}}@media(min-width:1200px){.row{margin-left:-30px;*zoom:1}.row:before,.row:after{display:table;line-height:0;content:""}.row:after{clear:both}[class*="span"]{float:left;margin-left:30px}.container,.navbar-static-top .container,.navbar-fixed-top .container,.navbar-fixed-bottom .container{width:1170px}.span12{width:1170px}.span11{width:1070px}.span10{width:970px}.span9{width:870px}.span8{width:770px}.span7{width:670px}.span6{width:570px}.span5{width:470px}.span4{width:370px}.span3{width:270px}.span2{width:170px}.span1{width:70px}.offset12{margin-left:1230px}.offset11{margin-left:1130px}.offset10{margin-left:1030px}.offset9{margin-left:930px}.offset8{margin-left:830px}.offset7{margin-left:730px}.offset6{margin-left:630px}.offset5{margin-left:530px}.offset4{margin-left:430px}.offset3{margin-left:330px}.offset2{margin-left:230px}.offset1{margin-left:130px}.row-fluid{width:100%;*zoom:1}.row-fluid:before,.row-fluid:after{display:table;line-height:0;content:""}.row-fluid:after{clear:both}.row-fluid [class*="span"]{display:block;float:left;width:100%;min-height:30px;margin-left:2.564102564102564%;*margin-left:2.5109110747408616%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.row-fluid [class*="span"]:first-child{margin-left:0}.row-fluid .span12{width:100%;*width:99.94680851063829%}.row-fluid .span11{width:91.45299145299145%;*width:91.39979996362975%}.row-fluid .span10{width:82.90598290598291%;*width:82.8527914166212%}.row-fluid .span9{width:74.35897435897436%;*width:74.30578286961266%}.row-fluid .span8{width:65.81196581196582%;*width:65.75877432260411%}.row-fluid .span7{width:57.26495726495726%;*width:57.21176577559556%}.row-fluid .span6{width:48.717948717948715%;*width:48.664757228587014%}.row-fluid .span5{width:40.17094017094017%;*width:40.11774868157847%}.row-fluid .span4{width:31.623931623931625%;*width:31.570740134569924%}.row-fluid .span3{width:23.076923076923077%;*width:23.023731587561375%}.row-fluid .span2{width:14.52991452991453%;*width:14.476723040552828%}.row-fluid .span1{width:5.982905982905983%;*width:5.929714493544281%}.row-fluid .offset12{margin-left:105.12820512820512%;*margin-left:105.02182214948171%}.row-fluid .offset12:first-child{margin-left:102.56410256410257%;*margin-left:102.45771958537915%}.row-fluid .offset11{margin-left:96.58119658119658%;*margin-left:96.47481360247316%}.row-fluid .offset11:first-child{margin-left:94.01709401709402%;*margin-left:93.91071103837061%}.row-fluid .offset10{margin-left:88.03418803418803%;*margin-left:87.92780505546462%}.row-fluid .offset10:first-child{margin-left:85.47008547008548%;*margin-left:85.36370249136206%}.row-fluid .offset9{margin-left:79.48717948717949%;*margin-left:79.38079650845607%}.row-fluid .offset9:first-child{margin-left:76.92307692307693%;*margin-left:76.81669394435352%}.row-fluid .offset8{margin-left:70.94017094017094%;*margin-left:70.83378796144753%}.row-fluid .offset8:first-child{margin-left:68.37606837606839%;*margin-left:68.26968539734497%}.row-fluid .offset7{margin-left:62.393162393162385%;*margin-left:62.28677941443899%}.row-fluid .offset7:first-child{margin-left:59.82905982905982%;*margin-left:59.72267685033642%}.row-fluid .offset6{margin-left:53.84615384615384%;*margin-left:53.739770867430444%}.row-fluid .offset6:first-child{margin-left:51.28205128205128%;*margin-left:51.175668303327875%}.row-fluid .offset5{margin-left:45.299145299145295%;*margin-left:45.1927623204219%}.row-fluid .offset5:first-child{margin-left:42.73504273504273%;*margin-left:42.62865975631933%}.row-fluid .offset4{margin-left:36.75213675213675%;*margin-left:36.645753773413354%}.row-fluid .offset4:first-child{margin-left:34.18803418803419%;*margin-left:34.081651209310785%}.row-fluid .offset3{margin-left:28.205128205128204%;*margin-left:28.0987452264048%}.row-fluid .offset3:first-child{margin-left:25.641025641025642%;*margin-left:25.53464266230224%}.row-fluid .offset2{margin-left:19.65811965811966%;*margin-left:19.551736679396257%}.row-fluid .offset2:first-child{margin-left:17.094017094017094%;*margin-left:16.98763411529369%}.row-fluid .offset1{margin-left:11.11111111111111%;*margin-left:11.004728132387708%}.row-fluid .offset1:first-child{margin-left:8.547008547008547%;*margin-left:8.440625568285142%}input,textarea,.uneditable-input{margin-left:0}.controls-row [class*="span"]+[class*="span"]{margin-left:30px}input.span12,textarea.span12,.uneditable-input.span12{width:1156px}input.span11,textarea.span11,.uneditable-input.span11{width:1056px}input.span10,textarea.span10,.uneditable-input.span10{width:956px}input.span9,textarea.span9,.uneditable-input.span9{width:856px}input.span8,textarea.span8,.uneditable-input.span8{width:756px}input.span7,textarea.span7,.uneditable-input.span7{width:656px}input.span6,textarea.span6,.uneditable-input.span6{width:556px}input.span5,textarea.span5,.uneditable-input.span5{width:456px}input.span4,textarea.span4,.uneditable-input.span4{width:356px}input.span3,textarea.span3,.uneditable-input.span3{width:256px}input.span2,textarea.span2,.uneditable-input.span2{width:156px}input.span1,textarea.span1,.uneditable-input.span1{width:56px}.thumbnails{margin-left:-30px}.thumbnails>li{margin-left:30px}.row-fluid .thumbnails{margin-left:0}}@media(min-width:768px) and (max-width:979px){.row{margin-left:-20px;*zoom:1}.row:before,.row:after{display:table;line-height:0;content:""}.row:after{clear:both}[class*="span"]{float:left;margin-left:20px}.container,.navbar-static-top .container,.navbar-fixed-top .container,.navbar-fixed-bottom .container{width:724px}.span12{width:724px}.span11{width:662px}.span10{width:600px}.span9{width:538px}.span8{width:476px}.span7{width:414px}.span6{width:352px}.span5{width:290px}.span4{width:228px}.span3{width:166px}.span2{width:104px}.span1{width:42px}.offset12{margin-left:764px}.offset11{margin-left:702px}.offset10{margin-left:640px}.offset9{margin-left:578px}.offset8{margin-left:516px}.offset7{margin-left:454px}.offset6{margin-left:392px}.offset5{margin-left:330px}.offset4{margin-left:268px}.offset3{margin-left:206px}.offset2{margin-left:144px}.offset1{margin-left:82px}.row-fluid{width:100%;*zoom:1}.row-fluid:before,.row-fluid:after{display:table;line-height:0;content:""}.row-fluid:after{clear:both}.row-fluid [class*="span"]{display:block;float:left;width:100%;min-height:30px;margin-left:2.7624309392265194%;*margin-left:2.709239449864817%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.row-fluid [class*="span"]:first-child{margin-left:0}.row-fluid .span12{width:100%;*width:99.94680851063829%}.row-fluid .span11{width:91.43646408839778%;*width:91.38327259903608%}.row-fluid .span10{width:82.87292817679558%;*width:82.81973668743387%}.row-fluid .span9{width:74.30939226519337%;*width:74.25620077583166%}.row-fluid .span8{width:65.74585635359117%;*width:65.69266486422946%}.row-fluid .span7{width:57.18232044198895%;*width:57.12912895262725%}.row-fluid .span6{width:48.61878453038674%;*width:48.56559304102504%}.row-fluid .span5{width:40.05524861878453%;*width:40.00205712942283%}.row-fluid .span4{width:31.491712707182323%;*width:31.43852121782062%}.row-fluid .span3{width:22.92817679558011%;*width:22.87498530621841%}.row-fluid .span2{width:14.3646408839779%;*width:14.311449394616199%}.row-fluid .span1{width:5.801104972375691%;*width:5.747913483013988%}.row-fluid .offset12{margin-left:105.52486187845304%;*margin-left:105.41847889972962%}.row-fluid .offset12:first-child{margin-left:102.76243093922652%;*margin-left:102.6560479605031%}.row-fluid .offset11{margin-left:96.96132596685082%;*margin-left:96.8549429881274%}.row-fluid .offset11:first-child{margin-left:94.1988950276243%;*margin-left:94.09251204890089%}.row-fluid .offset10{margin-left:88.39779005524862%;*margin-left:88.2914070765252%}.row-fluid .offset10:first-child{margin-left:85.6353591160221%;*margin-left:85.52897613729868%}.row-fluid .offset9{margin-left:79.8342541436464%;*margin-left:79.72787116492299%}.row-fluid .offset9:first-child{margin-left:77.07182320441989%;*margin-left:76.96544022569647%}.row-fluid .offset8{margin-left:71.2707182320442%;*margin-left:71.16433525332079%}.row-fluid .offset8:first-child{margin-left:68.50828729281768%;*margin-left:68.40190431409427%}.row-fluid .offset7{margin-left:62.70718232044199%;*margin-left:62.600799341718584%}.row-fluid .offset7:first-child{margin-left:59.94475138121547%;*margin-left:59.838368402492065%}.row-fluid .offset6{margin-left:54.14364640883978%;*margin-left:54.037263430116376%}.row-fluid .offset6:first-child{margin-left:51.38121546961326%;*margin-left:51.27483249088986%}.row-fluid .offset5{margin-left:45.58011049723757%;*margin-left:45.47372751851417%}.row-fluid .offset5:first-child{margin-left:42.81767955801105%;*margin-left:42.71129657928765%}.row-fluid .offset4{margin-left:37.01657458563536%;*margin-left:36.91019160691196%}.row-fluid .offset4:first-child{margin-left:34.25414364640884%;*margin-left:34.14776066768544%}.row-fluid .offset3{margin-left:28.45303867403315%;*margin-left:28.346655695309746%}.row-fluid .offset3:first-child{margin-left:25.69060773480663%;*margin-left:25.584224756083227%}.row-fluid .offset2{margin-left:19.88950276243094%;*margin-left:19.783119783707537%}.row-fluid .offset2:first-child{margin-left:17.12707182320442%;*margin-left:17.02068884448102%}.row-fluid .offset1{margin-left:11.32596685082873%;*margin-left:11.219583872105325%}.row-fluid .offset1:first-child{margin-left:8.56353591160221%;*margin-left:8.457152932878806%}input,textarea,.uneditable-input{margin-left:0}.controls-row [class*="span"]+[class*="span"]{margin-left:20px}input.span12,textarea.span12,.uneditable-input.span12{width:710px}input.span11,textarea.span11,.uneditable-input.span11{width:648px}input.span10,textarea.span10,.uneditable-input.span10{width:586px}input.span9,textarea.span9,.uneditable-input.span9{width:524px}input.span8,textarea.span8,.uneditable-input.span8{width:462px}input.span7,textarea.span7,.uneditable-input.span7{width:400px}input.span6,textarea.span6,.uneditable-input.span6{width:338px}input.span5,textarea.span5,.uneditable-input.span5{width:276px}input.span4,textarea.span4,.uneditable-input.span4{width:214px}input.span3,textarea.span3,.uneditable-input.span3{width:152px}input.span2,textarea.span2,.uneditable-input.span2{width:90px}input.span1,textarea.span1,.uneditable-input.span1{width:28px}}@media(max-width:767px){body{padding-right:20px;padding-left:20px}.navbar-fixed-top,.navbar-fixed-bottom{margin-right:-20px;margin-left:-20px}.container-fluid{padding:0}.dl-horizontal dt{float:none;width:auto;clear:none;text-align:left}.dl-horizontal dd{margin-left:0}.container{width:auto}.row-fluid{width:100%}.row,.thumbnails{margin-left:0}.thumbnails>li{float:none;margin-left:0}[class*="span"],.row-fluid [class*="span"]{display:block;float:none;width:auto;margin-left:0}.span12,.row-fluid .span12{width:100%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.input-large,.input-xlarge,.input-xxlarge,input[class*="span"],select[class*="span"],textarea[class*="span"],.uneditable-input{display:block;width:100%;min-height:30px;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.input-prepend input,.input-append input,.input-prepend input[class*="span"],.input-append input[class*="span"]{display:inline-block;width:auto}.modal{position:fixed;top:20px;right:20px;left:20px;width:auto;margin:0}.modal.fade.in{top:auto}}@media(max-width:480px){.nav-collapse{-webkit-transform:translate3d(0,0,0)}.page-header h1 small{display:block;line-height:20px}input[type="checkbox"],input[type="radio"]{border:1px solid #ccc}.form-horizontal .control-group>label{float:none;width:auto;padding-top:0;text-align:left}.form-horizontal .controls{margin-left:0}.form-horizontal .control-list{padding-top:0}.form-horizontal .form-actions{padding-right:10px;padding-left:10px}.modal{top:10px;right:10px;left:10px}.modal-header .close{padding:10px;margin:-10px}.carousel-caption{position:static}}@media(max-width:979px){body{padding-top:0}.navbar-fixed-top,.navbar-fixed-bottom{position:static}.navbar-fixed-top{margin-bottom:20px}.navbar-fixed-bottom{margin-top:20px}.navbar-fixed-top .navbar-inner,.navbar-fixed-bottom .navbar-inner{padding:5px}.navbar .container{width:auto;padding:0}.navbar .brand{padding-right:10px;padding-left:10px;margin:0 0 0 -5px}.nav-collapse{clear:both}.nav-collapse .nav{float:none;margin:0 0 10px}.nav-collapse .nav>li{float:none}.nav-collapse .nav>li>a{margin-bottom:2px}.nav-collapse .nav>.divider-vertical{display:none}.nav-collapse .nav .nav-header{color:#555;text-shadow:none}.nav-collapse .nav>li>a,.nav-collapse .dropdown-menu a{padding:9px 15px;font-weight:bold;color:#555;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.nav-collapse .btn{padding:4px 10px 4px;font-weight:normal;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.nav-collapse .dropdown-menu li+li a{margin-bottom:2px}.nav-collapse .nav>li>a:hover,.nav-collapse .dropdown-menu a:hover{background-color:#f2f2f2}.navbar-inverse .nav-collapse .nav>li>a:hover,.navbar-inverse .nav-collapse .dropdown-menu a:hover{background-color:#111}.nav-collapse.in .btn-group{padding:0;margin-top:5px}.nav-collapse .dropdown-menu{position:static;top:auto;left:auto;display:block;float:none;max-width:none;padding:0;margin:0 15px;background-color:transparent;border:0;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0;-webkit-box-shadow:none;-moz-box-shadow:none;box-shadow:none}.nav-collapse .dropdown-menu:before,.nav-collapse .dropdown-menu:after{display:none}.nav-collapse .dropdown-menu .divider{display:none}.nav-collapse .navbar-form,.nav-collapse .navbar-search{float:none;padding:10px 15px;margin:10px 0;border-top:1px solid #f2f2f2;border-bottom:1px solid #f2f2f2;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1);-moz-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1);box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1)}.navbar .nav-collapse .nav.pull-right{float:none;margin-left:0}.nav-collapse,.nav-collapse.collapse{height:0;overflow:hidden}.navbar .btn-navbar{display:block}.navbar-static .navbar-inner{padding-right:10px;padding-left:10px}}@media(min-width:980px){.nav-collapse.collapse{height:auto!important;overflow:visible!important}} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/css/bootstrap.min.css b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/css/bootstrap.min.css deleted file mode 100644 index 4a4440c..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/css/bootstrap.min.css +++ /dev/null @@ -1,10 +0,0 @@ -/*! - * Bootstrap v2.1.0 - * - * Copyright 2012 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world @twitter by @mdo and @fat. - * Augmented by Eric Kryski @ekryski - */article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}audio:not([controls]){display:none}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}a:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}a:hover,a:active{outline:0}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}img{height:auto;max-width:100%;vertical-align:middle;border:0;-ms-interpolation-mode:bicubic}#map-canvas img{max-width:none}button,input,select,textarea{margin:0;font-size:100%;vertical-align:middle}button,input{*overflow:visible;line-height:normal}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}button,input[type="button"],input[type="reset"],input[type="submit"]{cursor:pointer;-webkit-appearance:button}input[type="search"]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type="search"]::-webkit-search-decoration,input[type="search"]::-webkit-search-cancel-button{-webkit-appearance:none}textarea{overflow:auto;vertical-align:top}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;line-height:0;content:""}.clearfix:after{clear:both}.hide-text{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.input-block-level{display:block;width:100%;min-height:30px;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}body{margin:0;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:20px;color:#333;background-color:#fff}a{color:#08c;text-decoration:none}a:hover{color:#005580;text-decoration:underline}.img-rounded{-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.img-polaroid{padding:4px;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.2);-webkit-box-shadow:0 1px 3px rgba(0,0,0,0.1);-moz-box-shadow:0 1px 3px rgba(0,0,0,0.1);box-shadow:0 1px 3px rgba(0,0,0,0.1)}.img-circle{-webkit-border-radius:500px;-moz-border-radius:500px;border-radius:500px}.row{margin-left:-20px;*zoom:1}.row:before,.row:after{display:table;line-height:0;content:""}.row:after{clear:both}[class*="span"]{float:left;margin-left:20px}.container,.navbar-static-top .container,.navbar-fixed-top .container,.navbar-fixed-bottom .container{width:940px}.span12{width:940px}.span11{width:860px}.span10{width:780px}.span9{width:700px}.span8{width:620px}.span7{width:540px}.span6{width:460px}.span5{width:380px}.span4{width:300px}.span3{width:220px}.span2{width:140px}.span1{width:60px}.offset12{margin-left:980px}.offset11{margin-left:900px}.offset10{margin-left:820px}.offset9{margin-left:740px}.offset8{margin-left:660px}.offset7{margin-left:580px}.offset6{margin-left:500px}.offset5{margin-left:420px}.offset4{margin-left:340px}.offset3{margin-left:260px}.offset2{margin-left:180px}.offset1{margin-left:100px}.row-fluid{width:100%;*zoom:1}.row-fluid:before,.row-fluid:after{display:table;line-height:0;content:""}.row-fluid:after{clear:both}.row-fluid [class*="span"]{display:block;float:left;width:100%;min-height:30px;margin-left:2.127659574468085%;*margin-left:2.074468085106383%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.row-fluid [class*="span"]:first-child{margin-left:0}.row-fluid .span12{width:100%;*width:99.94680851063829%}.row-fluid .span11{width:91.48936170212765%;*width:91.43617021276594%}.row-fluid .span10{width:82.97872340425532%;*width:82.92553191489361%}.row-fluid .span9{width:74.46808510638297%;*width:74.41489361702126%}.row-fluid .span8{width:65.95744680851064%;*width:65.90425531914893%}.row-fluid .span7{width:57.44680851063829%;*width:57.39361702127659%}.row-fluid .span6{width:48.93617021276595%;*width:48.88297872340425%}.row-fluid .span5{width:40.42553191489362%;*width:40.37234042553192%}.row-fluid .span4{width:31.914893617021278%;*width:31.861702127659576%}.row-fluid .span3{width:23.404255319148934%;*width:23.351063829787233%}.row-fluid .span2{width:14.893617021276595%;*width:14.840425531914894%}.row-fluid .span1{width:6.382978723404255%;*width:6.329787234042553%}.row-fluid .offset12{margin-left:104.25531914893617%;*margin-left:104.14893617021275%}.row-fluid .offset12:first-child{margin-left:102.12765957446808%;*margin-left:102.02127659574467%}.row-fluid .offset11{margin-left:95.74468085106382%;*margin-left:95.6382978723404%}.row-fluid .offset11:first-child{margin-left:93.61702127659574%;*margin-left:93.51063829787232%}.row-fluid .offset10{margin-left:87.23404255319149%;*margin-left:87.12765957446807%}.row-fluid .offset10:first-child{margin-left:85.1063829787234%;*margin-left:84.99999999999999%}.row-fluid .offset9{margin-left:78.72340425531914%;*margin-left:78.61702127659572%}.row-fluid .offset9:first-child{margin-left:76.59574468085106%;*margin-left:76.48936170212764%}.row-fluid .offset8{margin-left:70.2127659574468%;*margin-left:70.10638297872339%}.row-fluid .offset8:first-child{margin-left:68.08510638297872%;*margin-left:67.9787234042553%}.row-fluid .offset7{margin-left:61.70212765957446%;*margin-left:61.59574468085106%}.row-fluid .offset7:first-child{margin-left:59.574468085106375%;*margin-left:59.46808510638297%}.row-fluid .offset6{margin-left:53.191489361702125%;*margin-left:53.085106382978715%}.row-fluid .offset6:first-child{margin-left:51.063829787234035%;*margin-left:50.95744680851063%}.row-fluid .offset5{margin-left:44.68085106382979%;*margin-left:44.57446808510638%}.row-fluid .offset5:first-child{margin-left:42.5531914893617%;*margin-left:42.4468085106383%}.row-fluid .offset4{margin-left:36.170212765957444%;*margin-left:36.06382978723405%}.row-fluid .offset4:first-child{margin-left:34.04255319148936%;*margin-left:33.93617021276596%}.row-fluid .offset3{margin-left:27.659574468085104%;*margin-left:27.5531914893617%}.row-fluid .offset3:first-child{margin-left:25.53191489361702%;*margin-left:25.425531914893618%}.row-fluid .offset2{margin-left:19.148936170212764%;*margin-left:19.04255319148936%}.row-fluid .offset2:first-child{margin-left:17.02127659574468%;*margin-left:16.914893617021278%}.row-fluid .offset1{margin-left:10.638297872340425%;*margin-left:10.53191489361702%}.row-fluid .offset1:first-child{margin-left:8.51063829787234%;*margin-left:8.404255319148938%}[class*="span"].hide,.row-fluid [class*="span"].hide{display:none}[class*="span"].pull-right,.row-fluid [class*="span"].pull-right{float:right}.container{margin-right:auto;margin-left:auto;*zoom:1}.container:before,.container:after{display:table;line-height:0;content:""}.container:after{clear:both}.container-fluid{padding-right:20px;padding-left:20px;*zoom:1}.container-fluid:before,.container-fluid:after{display:table;line-height:0;content:""}.container-fluid:after{clear:both}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:20px;font-weight:200;line-height:30px}small{font-size:85%}strong{font-weight:bold}em{font-style:italic}cite{font-style:normal}.muted{color:#999}h1,h2,h3,h4,h5,h6{margin:10px 0;font-family:inherit;font-weight:bold;line-height:1;color:inherit;text-rendering:optimizelegibility}h1 small,h2 small,h3 small,h4 small,h5 small,h6 small{font-weight:normal;line-height:1;color:#999}h1{font-size:36px;line-height:40px}h2{font-size:30px;line-height:40px}h3{font-size:24px;line-height:40px}h4{font-size:18px;line-height:20px}h5{font-size:14px;line-height:20px}h6{font-size:12px;line-height:20px}h1 small{font-size:24px}h2 small{font-size:18px}h3 small{font-size:14px}h4 small{font-size:14px}.page-header{padding-bottom:9px;margin:20px 0 30px;border-bottom:1px solid #eee}ul,ol{padding:0;margin:0 0 10px 25px}ul ul,ul ol,ol ol,ol ul{margin-bottom:0}li{line-height:20px}ul.unstyled,ol.unstyled{margin-left:0;list-style:none}dl{margin-bottom:20px}dt,dd{line-height:20px}dt{font-weight:bold}dd{margin-left:10px}.dl-horizontal dt{float:left;width:120px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:130px}hr{margin:20px 0;border:0;border-top:1px solid #eee;border-bottom:1px solid #fff}abbr[title]{cursor:help;border-bottom:1px dotted #999}abbr.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:0 0 0 15px;margin:0 0 20px;border-left:5px solid #eee}blockquote p{margin-bottom:0;font-size:16px;font-weight:300;line-height:25px}blockquote small{display:block;line-height:20px;color:#999}blockquote small:before{content:'\2014 \00A0'}blockquote.pull-right{float:right;padding-right:15px;padding-left:0;border-right:5px solid #eee;border-left:0}blockquote.pull-right p,blockquote.pull-right small{text-align:right}blockquote.pull-right small:before{content:''}blockquote.pull-right small:after{content:'\00A0 \2014'}q:before,q:after,blockquote:before,blockquote:after{content:""}address{display:block;margin-bottom:20px;font-style:normal;line-height:20px}code,pre{padding:0 3px 2px;font-family:Monaco,Menlo,Consolas,"Courier New",monospace;font-size:12px;color:#333;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}code{padding:2px 4px;color:#d14;background-color:#f7f7f9;border:1px solid #e1e1e8}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:20px;word-break:break-all;word-wrap:break-word;white-space:pre;white-space:pre-wrap;background-color:#f5f5f5;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.15);-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}pre.prettyprint{margin-bottom:20px}pre code{padding:0;color:inherit;background-color:transparent;border:0}.pre-scrollable{max-height:340px;overflow-y:scroll}form{margin:0 0 20px}fieldset{padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:40px;color:#333;border:0;border-bottom:1px solid #e5e5e5}legend small{font-size:15px;color:#999}label,input,button,select,textarea{font-size:14px;font-weight:normal;line-height:20px}input,button,select,textarea{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif}label{display:block;margin-bottom:5px}select,textarea,input[type="text"],input[type="password"],input[type="datetime"],input[type="datetime-local"],input[type="date"],input[type="month"],input[type="time"],input[type="week"],input[type="number"],input[type="email"],input[type="url"],input[type="search"],input[type="tel"],input[type="color"],.uneditable-input{display:inline-block;height:20px;padding:4px 6px;margin-bottom:9px;font-size:14px;line-height:20px;color:#555;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}input,textarea{width:210px}textarea{height:auto}textarea,input[type="text"],input[type="password"],input[type="datetime"],input[type="datetime-local"],input[type="date"],input[type="month"],input[type="time"],input[type="week"],input[type="number"],input[type="email"],input[type="url"],input[type="search"],input[type="tel"],input[type="color"],.uneditable-input{background-color:#fff;border:1px solid #ccc;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-webkit-transition:border linear .2s,box-shadow linear .2s;-moz-transition:border linear .2s,box-shadow linear .2s;-o-transition:border linear .2s,box-shadow linear .2s;transition:border linear .2s,box-shadow linear .2s}textarea:focus,input[type="text"]:focus,input[type="password"]:focus,input[type="datetime"]:focus,input[type="datetime-local"]:focus,input[type="date"]:focus,input[type="month"]:focus,input[type="time"]:focus,input[type="week"]:focus,input[type="number"]:focus,input[type="email"]:focus,input[type="url"]:focus,input[type="search"]:focus,input[type="tel"]:focus,input[type="color"]:focus,.uneditable-input:focus{border-color:rgba(82,168,236,0.8);outline:0;outline:thin dotted \9;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(82,168,236,0.6);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(82,168,236,0.6);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(82,168,236,0.6)}input[type="radio"],input[type="checkbox"]{margin:4px 0 0;margin-top:1px \9;*margin-top:0;line-height:normal;cursor:pointer}input[type="file"],input[type="image"],input[type="submit"],input[type="reset"],input[type="button"],input[type="radio"],input[type="checkbox"]{width:auto}select,input[type="file"]{height:30px;*margin-top:4px;line-height:30px}select{width:220px;background-color:#fff;border:1px solid #bbb}select[multiple],select[size]{height:auto}select:focus,input[type="file"]:focus,input[type="radio"]:focus,input[type="checkbox"]:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.uneditable-input,.uneditable-textarea{color:#999;cursor:not-allowed;background-color:#fcfcfc;border-color:#ccc;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.025);-moz-box-shadow:inset 0 1px 2px rgba(0,0,0,0.025);box-shadow:inset 0 1px 2px rgba(0,0,0,0.025)}.uneditable-input{overflow:hidden;white-space:nowrap}.uneditable-textarea{width:auto;height:auto}input:-moz-placeholder,textarea:-moz-placeholder{color:#999}input:-ms-input-placeholder,textarea:-ms-input-placeholder{color:#999}input::-webkit-input-placeholder,textarea::-webkit-input-placeholder{color:#999}.radio,.checkbox{min-height:18px;padding-left:18px}.radio input[type="radio"],.checkbox input[type="checkbox"]{float:left;margin-left:-18px}.controls>.radio:first-child,.controls>.checkbox:first-child{padding-top:5px}.radio.inline,.checkbox.inline{display:inline-block;padding-top:5px;margin-bottom:0;vertical-align:middle}.radio.inline+.radio.inline,.checkbox.inline+.checkbox.inline{margin-left:10px}.input-mini{width:60px}.input-small{width:90px}.input-medium{width:150px}.input-large{width:210px}.input-xlarge{width:270px}.input-xxlarge{width:530px}input[class*="span"],select[class*="span"],textarea[class*="span"],.uneditable-input[class*="span"],.row-fluid input[class*="span"],.row-fluid select[class*="span"],.row-fluid textarea[class*="span"],.row-fluid .uneditable-input[class*="span"]{float:none;margin-left:0}.input-append input[class*="span"],.input-append .uneditable-input[class*="span"],.input-prepend input[class*="span"],.input-prepend .uneditable-input[class*="span"],.row-fluid input[class*="span"],.row-fluid select[class*="span"],.row-fluid textarea[class*="span"],.row-fluid .uneditable-input[class*="span"],.row-fluid .input-prepend [class*="span"],.row-fluid .input-append [class*="span"]{display:inline-block}input,textarea,.uneditable-input{margin-left:0}.controls-row [class*="span"]+[class*="span"]{margin-left:20px}input.span12,textarea.span12,.uneditable-input.span12{width:926px}input.span11,textarea.span11,.uneditable-input.span11{width:846px}input.span10,textarea.span10,.uneditable-input.span10{width:766px}input.span9,textarea.span9,.uneditable-input.span9{width:686px}input.span8,textarea.span8,.uneditable-input.span8{width:606px}input.span7,textarea.span7,.uneditable-input.span7{width:526px}input.span6,textarea.span6,.uneditable-input.span6{width:446px}input.span5,textarea.span5,.uneditable-input.span5{width:366px}input.span4,textarea.span4,.uneditable-input.span4{width:286px}input.span3,textarea.span3,.uneditable-input.span3{width:206px}input.span2,textarea.span2,.uneditable-input.span2{width:126px}input.span1,textarea.span1,.uneditable-input.span1{width:46px}.controls-row{*zoom:1}.controls-row:before,.controls-row:after{display:table;line-height:0;content:""}.controls-row:after{clear:both}.controls-row [class*="span"]{float:left}input[disabled],select[disabled],textarea[disabled],input[readonly],select[readonly],textarea[readonly]{cursor:not-allowed;background-color:#eee}input[type="radio"][disabled],input[type="checkbox"][disabled],input[type="radio"][readonly],input[type="checkbox"][readonly]{background-color:transparent}.control-group.warning>label,.control-group.warning .help-block,.control-group.warning .help-inline{color:#c09853}.control-group.warning .checkbox,.control-group.warning .radio,.control-group.warning input,.control-group.warning select,.control-group.warning textarea{color:#c09853;border-color:#c09853;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.control-group.warning .checkbox:focus,.control-group.warning .radio:focus,.control-group.warning input:focus,.control-group.warning select:focus,.control-group.warning textarea:focus{border-color:#a47e3c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e;-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e}.control-group.warning .input-prepend .add-on,.control-group.warning .input-append .add-on{color:#c09853;background-color:#fcf8e3;border-color:#c09853}.control-group.error>label,.control-group.error .help-block,.control-group.error .help-inline{color:#b94a48}.control-group.error .checkbox,.control-group.error .radio,.control-group.error input,.control-group.error select,.control-group.error textarea{color:#b94a48;border-color:#b94a48;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.control-group.error .checkbox:focus,.control-group.error .radio:focus,.control-group.error input:focus,.control-group.error select:focus,.control-group.error textarea:focus{border-color:#953b39;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392;-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392}.control-group.error .input-prepend .add-on,.control-group.error .input-append .add-on{color:#b94a48;background-color:#f2dede;border-color:#b94a48}.control-group.success>label,.control-group.success .help-block,.control-group.success .help-inline{color:#468847}.control-group.success .checkbox,.control-group.success .radio,.control-group.success input,.control-group.success select,.control-group.success textarea{color:#468847;border-color:#468847;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.control-group.success .checkbox:focus,.control-group.success .radio:focus,.control-group.success input:focus,.control-group.success select:focus,.control-group.success textarea:focus{border-color:#356635;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b;-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b}.control-group.success .input-prepend .add-on,.control-group.success .input-append .add-on{color:#468847;background-color:#dff0d8;border-color:#468847}input:focus:required:invalid,textarea:focus:required:invalid,select:focus:required:invalid{color:#b94a48;border-color:#ee5f5b}input:focus:required:invalid:focus,textarea:focus:required:invalid:focus,select:focus:required:invalid:focus{border-color:#e9322d;-webkit-box-shadow:0 0 6px #f8b9b7;-moz-box-shadow:0 0 6px #f8b9b7;box-shadow:0 0 6px #f8b9b7}.form-actions{padding:19px 20px 20px;margin-top:20px;margin-bottom:20px;background-color:#f5f5f5;border-top:1px solid #e5e5e5;*zoom:1}.form-actions:before,.form-actions:after{display:table;line-height:0;content:""}.form-actions:after{clear:both}.help-block,.help-inline{color:#595959}.help-block{display:block;margin-bottom:10px}.help-inline{display:inline-block;*display:inline;padding-left:5px;vertical-align:middle;*zoom:1}.input-append,.input-prepend{margin-bottom:5px;font-size:0;white-space:nowrap}.input-append input,.input-prepend input,.input-append select,.input-prepend select,.input-append .uneditable-input,.input-prepend .uneditable-input{position:relative;margin-bottom:0;*margin-left:0;font-size:14px;vertical-align:top;-webkit-border-radius:0 3px 3px 0;-moz-border-radius:0 3px 3px 0;border-radius:0 3px 3px 0}.input-append input:focus,.input-prepend input:focus,.input-append select:focus,.input-prepend select:focus,.input-append .uneditable-input:focus,.input-prepend .uneditable-input:focus{z-index:2}.input-append .add-on,.input-prepend .add-on{display:inline-block;width:auto;height:20px;min-width:16px;padding:4px 5px;font-size:14px;font-weight:normal;line-height:20px;text-align:center;text-shadow:0 1px 0 #fff;background-color:#eee;border:1px solid #ccc}.input-append .add-on,.input-prepend .add-on,.input-append .btn,.input-prepend .btn{margin-left:-1px;vertical-align:top;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.input-append .active,.input-prepend .active{background-color:#a9dba9;border-color:#46a546}.input-prepend .add-on,.input-prepend .btn{margin-right:-1px}.input-prepend .add-on:first-child,.input-prepend .btn:first-child{-webkit-border-radius:3px 0 0 3px;-moz-border-radius:3px 0 0 3px;border-radius:3px 0 0 3px}.input-append input,.input-append select,.input-append .uneditable-input{-webkit-border-radius:3px 0 0 3px;-moz-border-radius:3px 0 0 3px;border-radius:3px 0 0 3px}.input-append .add-on:last-child,.input-append .btn:last-child{-webkit-border-radius:0 3px 3px 0;-moz-border-radius:0 3px 3px 0;border-radius:0 3px 3px 0}.input-prepend.input-append input,.input-prepend.input-append select,.input-prepend.input-append .uneditable-input{-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.input-prepend.input-append .add-on:first-child,.input-prepend.input-append .btn:first-child{margin-right:-1px;-webkit-border-radius:3px 0 0 3px;-moz-border-radius:3px 0 0 3px;border-radius:3px 0 0 3px}.input-prepend.input-append .add-on:last-child,.input-prepend.input-append .btn:last-child{margin-left:-1px;-webkit-border-radius:0 3px 3px 0;-moz-border-radius:0 3px 3px 0;border-radius:0 3px 3px 0}input.search-query{padding-right:14px;padding-right:4px \9;padding-left:14px;padding-left:4px \9;margin-bottom:0;-webkit-border-radius:15px;-moz-border-radius:15px;border-radius:15px}.form-search .input-append .search-query,.form-search .input-prepend .search-query{-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.form-search .input-append .search-query{-webkit-border-radius:14px 0 0 14px;-moz-border-radius:14px 0 0 14px;border-radius:14px 0 0 14px}.form-search .input-append .btn{-webkit-border-radius:0 14px 14px 0;-moz-border-radius:0 14px 14px 0;border-radius:0 14px 14px 0}.form-search .input-prepend .search-query{-webkit-border-radius:0 14px 14px 0;-moz-border-radius:0 14px 14px 0;border-radius:0 14px 14px 0}.form-search .input-prepend .btn{-webkit-border-radius:14px 0 0 14px;-moz-border-radius:14px 0 0 14px;border-radius:14px 0 0 14px}.form-search input,.form-inline input,.form-horizontal input,.form-search textarea,.form-inline textarea,.form-horizontal textarea,.form-search select,.form-inline select,.form-horizontal select,.form-search .help-inline,.form-inline .help-inline,.form-horizontal .help-inline,.form-search .uneditable-input,.form-inline .uneditable-input,.form-horizontal .uneditable-input,.form-search .input-prepend,.form-inline .input-prepend,.form-horizontal .input-prepend,.form-search .input-append,.form-inline .input-append,.form-horizontal .input-append{display:inline-block;*display:inline;margin-bottom:0;vertical-align:middle;*zoom:1}.form-search .hide,.form-inline .hide,.form-horizontal .hide{display:none}.form-search label,.form-inline label,.form-search .btn-group,.form-inline .btn-group{display:inline-block}.form-search .input-append,.form-inline .input-append,.form-search .input-prepend,.form-inline .input-prepend{margin-bottom:0}.form-search .radio,.form-search .checkbox,.form-inline .radio,.form-inline .checkbox{padding-left:0;margin-bottom:0;vertical-align:middle}.form-search .radio input[type="radio"],.form-search .checkbox input[type="checkbox"],.form-inline .radio input[type="radio"],.form-inline .checkbox input[type="checkbox"]{float:left;margin-right:3px;margin-left:0}.control-group{margin-bottom:10px}legend+.control-group{margin-top:20px;-webkit-margin-top-collapse:separate}.form-horizontal .control-group{margin-bottom:20px;*zoom:1}.form-horizontal .control-group:before,.form-horizontal .control-group:after{display:table;line-height:0;content:""}.form-horizontal .control-group:after{clear:both}.form-horizontal .control-label{float:left;width:140px;padding-top:5px;text-align:right}.form-horizontal .controls{*display:inline-block;*padding-left:20px;margin-left:160px;*margin-left:0}.form-horizontal .controls:first-child{*padding-left:160px}.form-horizontal .help-block{margin-top:10px;margin-bottom:0}.form-horizontal .form-actions{padding-left:160px}table{max-width:100%;background-color:transparent;border-collapse:collapse;border-spacing:0}.table{width:100%;margin-bottom:20px}.table th,.table td{padding:8px;line-height:20px;text-align:left;vertical-align:top;border-top:1px solid #ddd}.table th{font-weight:bold}.table thead th{vertical-align:bottom}.table caption+thead tr:first-child th,.table caption+thead tr:first-child td,.table colgroup+thead tr:first-child th,.table colgroup+thead tr:first-child td,.table thead:first-child tr:first-child th,.table thead:first-child tr:first-child td{border-top:0}.table tbody+tbody{border-top:2px solid #ddd}.table-condensed th,.table-condensed td{padding:4px 5px}.table-bordered{border:1px solid #ddd;border-collapse:separate;*border-collapse:collapse;border-left:0;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.table-bordered th,.table-bordered td{border-left:1px solid #ddd}.table-bordered caption+thead tr:first-child th,.table-bordered caption+tbody tr:first-child th,.table-bordered caption+tbody tr:first-child td,.table-bordered colgroup+thead tr:first-child th,.table-bordered colgroup+tbody tr:first-child th,.table-bordered colgroup+tbody tr:first-child td,.table-bordered thead:first-child tr:first-child th,.table-bordered tbody:first-child tr:first-child th,.table-bordered tbody:first-child tr:first-child td{border-top:0}.table-bordered thead:first-child tr:first-child th:first-child,.table-bordered tbody:first-child tr:first-child td:first-child{-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-topleft:4px}.table-bordered thead:first-child tr:first-child th:last-child,.table-bordered tbody:first-child tr:first-child td:last-child{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-moz-border-radius-topright:4px}.table-bordered thead:last-child tr:last-child th:first-child,.table-bordered tbody:last-child tr:last-child td:first-child,.table-bordered tfoot:last-child tr:last-child td:first-child{-webkit-border-radius:0 0 0 4px;-moz-border-radius:0 0 0 4px;border-radius:0 0 0 4px;-webkit-border-bottom-left-radius:4px;border-bottom-left-radius:4px;-moz-border-radius-bottomleft:4px}.table-bordered thead:last-child tr:last-child th:last-child,.table-bordered tbody:last-child tr:last-child td:last-child,.table-bordered tfoot:last-child tr:last-child td:last-child{-webkit-border-bottom-right-radius:4px;border-bottom-right-radius:4px;-moz-border-radius-bottomright:4px}.table-bordered caption+thead tr:first-child th:first-child,.table-bordered caption+tbody tr:first-child td:first-child,.table-bordered colgroup+thead tr:first-child th:first-child,.table-bordered colgroup+tbody tr:first-child td:first-child{-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-topleft:4px}.table-bordered caption+thead tr:first-child th:last-child,.table-bordered caption+tbody tr:first-child td:last-child,.table-bordered colgroup+thead tr:first-child th:last-child,.table-bordered colgroup+tbody tr:first-child td:last-child{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-moz-border-right-topleft:4px}.table-striped tbody tr:nth-child(odd) td,.table-striped tbody tr:nth-child(odd) th{background-color:#f9f9f9}.table-hover tbody tr:hover td,.table-hover tbody tr:hover th{background-color:#f5f5f5}table [class*=span],.row-fluid table [class*=span]{display:table-cell;float:none;margin-left:0}table .span1{float:none;width:44px;margin-left:0}table .span2{float:none;width:124px;margin-left:0}table .span3{float:none;width:204px;margin-left:0}table .span4{float:none;width:284px;margin-left:0}table .span5{float:none;width:364px;margin-left:0}table .span6{float:none;width:444px;margin-left:0}table .span7{float:none;width:524px;margin-left:0}table .span8{float:none;width:604px;margin-left:0}table .span9{float:none;width:684px;margin-left:0}table .span10{float:none;width:764px;margin-left:0}table .span11{float:none;width:844px;margin-left:0}table .span12{float:none;width:924px;margin-left:0}table .span13{float:none;width:1004px;margin-left:0}table .span14{float:none;width:1084px;margin-left:0}table .span15{float:none;width:1164px;margin-left:0}table .span16{float:none;width:1244px;margin-left:0}table .span17{float:none;width:1324px;margin-left:0}table .span18{float:none;width:1404px;margin-left:0}table .span19{float:none;width:1484px;margin-left:0}table .span20{float:none;width:1564px;margin-left:0}table .span21{float:none;width:1644px;margin-left:0}table .span22{float:none;width:1724px;margin-left:0}table .span23{float:none;width:1804px;margin-left:0}table .span24{float:none;width:1884px;margin-left:0}.table tbody tr.success td{background-color:#dff0d8}.table tbody tr.error td{background-color:#f2dede}.table tbody tr.info td{background-color:#d9edf7}[class^="icon-"],[class*=" icon-"]{display:inline-block;width:32px;height:32px;margin-top:1px;margin-right:.3em;line-height:32px;vertical-align:text-top;background-image:url("../img/glyphicons.png");background-position:32px 32px;background-repeat:no-repeat}.icon-white,.nav>.active>a>[class^="icon-"],.nav>.active>a>[class*=" icon-"],.dropdown-menu>li>a:hover>[class^="icon-"],.dropdown-menu>li>a:hover>[class*=" icon-"],.dropdown-menu>.active>a>[class^="icon-"],.dropdown-menu>.active>a>[class*=" icon-"]{background-image:url("../img/glyphicons-white.png")}.icon-glass{background-position:0 0}.icon-leaf{background-position:-56px 0}.icon-dog{background-position:-112px 0}.icon-user{background-position:-170px 0}.icon-girl{background-position:-225px 0}.icon-car{background-position:-279px 0}.icon-user-add{background-position:-337px 0}.icon-user-remove{background-position:-397px 0}.icon-film{background-position:-455px 0}.icon-magic{background-position:-509px 0}.icon-envelope{background-position:-565px 0}.icon-camera{background-position:-621px 0}.icon-heart{background-position:-679px 0}.icon-beach-umbrella{background-position:-735px 0}.icon-train{background-position:-792px 0}.icon-print{background-position:-844px 0}.icon-bin{background-position:-900px 0}.icon-music{background-position:-952px 0}.icon-note{background-position:-1005px 0}.icon-cogwheel{background-position:-1055px 0}.icon-home{background-position:-1111px 0}.icon-snowflake{background-position:-1170px 0}.icon-fire{background-position:-1230px 0}.icon-cogwheels{background-position:-1282px 0}.icon-parents{background-position:-1340px 0}.icon-binoculars{background-position:-1404px 0}.icon-road{background-position:-1460px 0}.icon-search{background-position:-1520px 0}.icon-cars{background-position:-1576px 0}.icon-notes-2{background-position:-1644px 0}.icon-pencil{background-position:-1696px 0}.icon-bus{background-position:-1753px 0}.icon-wifi-alt{background-position:-1817px 0}.icon-luggage{background-position:-1875px 0}.icon-old-man{background-position:-1927px 0}.icon-woman{background-position:0 -60px}.icon-file{background-position:-54px -60px}.icon-credit{background-position:-105px -60px}.icon-airplane{background-position:-163px -60px}.icon-notes{background-position:-219px -60px}.icon-stats{background-position:-271px -60px}.icon-charts{background-position:-329px -60px}.icon-pie-chart{background-position:-388px -60px}.icon-group{background-position:-446px -60px}.icon-keys{background-position:-513px -60px}.icon-calendar{background-position:-569px -60px}.icon-router{background-position:-624px -60px}.icon-camera-small{background-position:-683px -60px}.icon-dislikes{background-position:-737px -60px}.icon-star{background-position:-795px -60px}.icon-link{background-position:-852px -60px}.icon-eye-open{background-position:-905px -60px}.icon-eye-close{background-position:-968px -60px}.icon-alarm{background-position:-1031px -60px}.icon-clock{background-position:-1091px -60px}.icon-stopwatch{background-position:-1147px -60px}.icon-projector{background-position:-1202px -60px}.icon-history{background-position:-1262px -60px}.icon-truck{background-position:-1319px -60px}.icon-cargo{background-position:-1383px -60px}.icon-compass{background-position:-1440px -60px}.icon-keynote{background-position:-1496px -60px}.icon-attach{background-position:-1548px -60px}.icon-power{background-position:-1606px -60px}.icon-lightbulb{background-position:-1660px -60px}.icon-tag{background-position:-1712px -60px}.icon-tags{background-position:-1768px -60px}.icon-cleaning{background-position:-1830px -60px}.icon-ruler{background-position:-1886px -60px}.icon-gift{background-position:-1945px -60px}.icon-umbrella{background-position:0 -122px}.icon-book{background-position:-58px -122px}.icon-bookmark{background-position:-112px -122px}.icon-signal{background-position:-160px -122px}.icon-cup{background-position:-223px -122px}.icon-stroller{background-position:-277px -122px}.icon-headphones{background-position:-334px -122px}.icon-headset{background-position:-390px -122px}.icon-warning-sign{background-position:-446px -122px}.icon-signal{background-position:-507px -122px}.icon-retweet{background-position:-563px -122px}.icon-refresh{background-position:-625px -122px}.icon-roundabout{background-position:-682px -122px}.icon-random{background-position:-741px -122px}.icon-heat{background-position:-801px -122px}.icon-repeat{background-position:-862px -122px}.icon-display{background-position:-918px -122px}.icon-log-book{background-position:-978px -122px}.icon-adress-book{background-position:-1032px -122px}.icon-magnet{background-position:-1086px -122px}.icon-table{background-position:-1139px -122px}.icon-adjust{background-position:-1195px -122px}.icon-tint{background-position:-1253px -122px}.icon-crop{background-position:-1308px -122px}.icon-vector-path-square{background-position:-1366px -122px}.icon-vector-path-circle{background-position:-1422px -122px}.icon-vector-path-polygon{background-position:-1478px -122px}.icon-vector-path-line{background-position:-1536px -122px}.icon-vector-path-curve{background-position:-1592px -122px}.icon-vector-path-all{background-position:-1648px -122px}.icon-font{background-position:-1704px -122px}.icon-italic{background-position:-1763px -122px}.icon-bold{background-position:-1809px -122px}.icon-text-underline{background-position:-1860px -122px}.icon-text-strike{background-position:-1912px -122px}.icon-text-height{background-position:-1964px -122px}.icon-text-width{background-position:0 -184px}.icon-text-resize{background-position:-54px -184px}.icon-left-indent{background-position:-112px -184px}.icon-right-indent{background-position:-168px -184px}.icon-align-left{background-position:-224px -184px}.icon-align-center{background-position:-280px -184px}.icon-align-right{background-position:-336px -184px}.icon-justify{background-position:-392px -184px}.icon-list{background-position:-448px -184px}.icon-text-smaller{background-position:-504px -184px}.icon-text-bigger{background-position:-558px -184px}.icon-embed{background-position:-614px -184px}.icon-embed-close{background-position:-676px -184px}.icon-adjust{background-position:-738px -184px}.icon-message-full{background-position:-793px -184px}.icon-message-empty{background-position:-849px -184px}.icon-message-in{background-position:-905px -184px}.icon-message-out{background-position:-961px -184px}.icon-message-plus{background-position:-1017px -184px}.icon-message-minus{background-position:-1078px -184px}.icon-message-ban{background-position:-1139px -184px}.icon-message-flag{background-position:-1200px -184px}.icon-message-lock{background-position:-1259px -184px}.icon-message-new{background-position:-1319px -184px}.icon-inbox{background-position:-1379px -184px}.icon-inbox-plus{background-position:-1435px -184px}.icon-inbox-minus{background-position:-1494px -184px}.icon-inbox-lock{background-position:-1553px -184px}.icon-inbox-in{background-position:-1611px -184px}.icon-inbox-out{background-position:-1667px -184px}.icon-computer-locked{background-position:-1723px -184px}.icon-computer-service{background-position:-1783px -184px}.icon-computer-process{background-position:-1843px -184px}.icon-phone{background-position:-1903px -184px}.icon-database-lock{background-position:-1950px -184px}.icon-database-plus{background-position:0 -246px}.icon-database-minus{background-position:-59px -246px}.icon-database-ban{background-position:-118px -246px}.icon-folder-open{background-position:-176px -246px}.icon-folder-plus{background-position:-238px -246px}.icon-folder-minus{background-position:-299px -246px}.icon-folder-lock{background-position:-360px -246px}.icon-folder-flag{background-position:-420px -246px}.icon-folder-new{background-position:-479px -246px}.icon-check{background-position:-539px -246px}.icon-edit{background-position:-593px -246px}.icon-new-window{background-position:-649px -246px}.icon-more-windows{background-position:-707px -246px}.icon-show-big-thumbnails{background-position:-762px -246px}.icon-show-thumbnails{background-position:-816px -246px}.icon-show-thumbnails-with-lines{background-position:-870px -246px}.icon-show-lines{background-position:-926px -246px}.icon-playlist{background-position:-982px -246px}.icon-picture{background-position:-1043px -246px}.icon-imac{background-position:-1099px -246px}.icon-macbook{background-position:-1157px -246px}.icon-ipad{background-position:-1217px -246px}.icon-iphone{background-position:-1269px -246px}.icon-iphone-transfer{background-position:-1315px -246px}.icon-iphone-exchange{background-position:-1376px -246px}.icon-ipod{background-position:-1437px -246px}.icon-ipod-shuffle{background-position:-1483px -246px}.icon-ear-plugs{background-position:-1530px -246px}.icon-albums{background-position:-1582px -246px}.icon-step-backward{background-position:-1642px -246px}.icon-fast-backward{background-position:-1688px -246px}.icon-rewind{background-position:-1745px -246px}.icon-play{background-position:-1800px -246px}.icon-pause{background-position:-1848px -246px}.icon-stop{background-position:-1890px -246px}.icon-forward{background-position:-1936px -246px}.icon-fast-forward{background-position:0 -308px}.icon-step-forward{background-position:-57px -308px}.icon-eject{background-position:-103px -308px}.icon-facetime-video{background-position:-153px -308px}.icon-download-alt{background-position:-209px -308px}.icon-mute{background-position:-265px -308px}.icon-volume-down{background-position:-319px -308px}.icon-volume-up{background-position:-367px -308px}.icon-screenshot{background-position:-423px -308px}.icon-move{background-position:-482px -308px}.icon-more{background-position:-538px -308px}.icon-brightness-reduce{background-position:-592px -308px}.icon-brightness-increase{background-position:-644px -308px}.icon-circle-plus{background-position:-700px -308px}.icon-circle-minus{background-position:-758px -308px}.icon-circle-remove{background-position:-816px -308px}.icon-circle-ok{background-position:-874px -308px}.icon-circle-question-mark{background-position:-932px -308px}.icon-circle-info{background-position:-990px -308px}.icon-circle-exclamation-mark{background-position:-1048px -308px}.icon-remove{background-position:-1106px -308px}.icon-ok{background-position:-1164px -308px}.icon-ban{background-position:-1222px -308px}.icon-download{background-position:-1280px -308px}.icon-upload{background-position:-1338px -308px}.icon-shopping-cart{background-position:-1396px -308px}.icon-lock{background-position:-1454px -308px}.icon-unlock{background-position:-1507px -308px}.icon-electricity{background-position:-1560px -308px}.icon-ok-2{background-position:-1603px -308px}.icon-remove-2{background-position:-1660px -308px}.icon-cart-out{background-position:-1710px -308px}.icon-cart-in{background-position:-1768px -308px}.icon-left-arrow{background-position:-1826px -308px}.icon-right-arrow{background-position:-1878px -308px}.icon-down-arrow{background-position:-1930px -308px}.icon-up-arrow{background-position:0 -370px}.icon-resize-small{background-position:-50px -370px}.icon-resize-full{background-position:-106px -370px}.icon-circle-arrow-left{background-position:-162px -370px}.icon-circle-arrow-right{background-position:-220px -370px}.icon-circle-arrow-up{background-position:-278px -370px}.icon-circle-arrow-down{background-position:-336px -370px}.icon-play-button{background-position:-394px -370px}.icon-unshare{background-position:-452px -370px}.icon-share{background-position:-508px -370px}.icon-thin-arrow-right{background-position:-564px -370px}.icon-thin-arrow-left{background-position:-611px -370px}.icon-bluetooth{background-position:-658px -370px}.icon-euro{background-position:-704px -370px}.icon-usd{background-position:-758px -370px}.icon-bp{background-position:-807px -370px}.icon-retweet-2{background-position:-856px -370px}.icon-moon{background-position:-921px -370px}.icon-sun{background-position:-975px -370px}.icon-cloud{background-position:-1031px -370px}.icon-direction{background-position:-1090px -370px}.icon-brush{background-position:-1148px -370px}.icon-pen{background-position:-1205px -370px}.icon-zoom-in{background-position:-1261px -370px}.icon-zoom-out{background-position:-1318px -370px}.icon-pin{background-position:-1375px -370px}.icon-riflescope{background-position:-1417px -370px}.icon-rotation-lock{background-position:-1474px -370px}.icon-flash{background-position:-1533px -370px}.icon-google-maps{background-position:-1579px -370px}.icon-anchor{background-position:-1626px -370px}.icon-conversation{background-position:-1682px -370px}.icon-chat{background-position:-1739px -370px}.icon-male{background-position:-1795px -370px}.icon-female{background-position:-1849px -370px}.icon-asterisk{background-position:-1897px -370px}.icon-divide{background-position:-1949px -370px}.icon-snorkel-diving{background-position:0 -432px}.icon-scuba-diving{background-position:-59px -432px}.icon-oxygen-bottle{background-position:-118px -432px}.icon-fins{background-position:-172px -432px}.icon-fishes{background-position:-235px -432px}.icon-boat{background-position:-295px -432px}.icon-delete-point{background-position:-351px -432px}.icon-sheriffs-star{background-position:-409px -432px}.icon-qrcode{background-position:-465px -432px}.icon-barcode{background-position:-521px -432px}.icon-pool{background-position:-577px -432px}.icon-buoy{background-position:-633px -432px}.icon-spade{background-position:-689px -432px}.icon-bank{background-position:-745px -432px}.icon-vcard{background-position:-801px -432px}.icon-electrical-plug{background-position:-855px -432px}.icon-flag{background-position:-905px -432px}.icon-credit-card{background-position:-958px -432px}.icon-keyboard-wireless{background-position:-1016px -432px}.icon-keyboard-wired{background-position:-1075px -432px}.icon-shield{background-position:-1134px -432px}.icon-ring{background-position:-1188px -432px}.icon-cake{background-position:-1241px -432px}.icon-drink{background-position:-1295px -432px}.icon-beer{background-position:-1350px -432px}.icon-fast-food{background-position:-1405px -432px}.icon-cutlery{background-position:-1465px -432px}.icon-pizza{background-position:-1510px -432px}.icon-birthday-cake{background-position:-1568px -432px}.icon-tablet{background-position:-1626px -432px}.icon-settings{background-position:-1683px -432px}.icon-bullets{background-position:-1739px -432px}.icon-cardio{background-position:-1798px -432px}.icon-t-shirt{background-position:-1855px -432px}.icon-pants{background-position:-1915px -432px}.icon-sweater{background-position:-1966px -432px}.icon-fabric{background-position:0 -494px}.icon-leather{background-position:-59px -494px}.icon-scissors{background-position:-114px -494px}.icon-podium{background-position:-170px -494px}.icon-skull{background-position:-230px -494px}.icon-celebration{background-position:-284px -494px}.icon-tea-kettle{background-position:-340px -494px}.icon-french-press{background-position:-398px -494px}.icon-coffe-cup{background-position:-453px -494px}.icon-pot{background-position:-510px -494px}.icon-grater{background-position:-569px -494px}.icon-kettle{background-position:-619px -494px}.icon-hospital{background-position:-674px -494px}.icon-hospital-h{background-position:-730px -494px}.icon-microphone{background-position:-786px -494px}.icon-webcam{background-position:-835px -494px}.icon-temple-christianity-church{background-position:-886px -494px}.icon-temple-islam{background-position:-942px -494px}.icon-temple-hindu{background-position:-999px -494px}.icon-temple-buddhist{background-position:-1055px -494px}.icon-electrical-socket-eu{background-position:-1115px -494px}.icon-electrical-socket-us{background-position:-1170px -494px}.icon-bomb{background-position:-1225px -494px}.icon-comments{background-position:-1284px -494px}.icon-flower{background-position:-1340px -494px}.icon-baseball{background-position:-1391px -494px}.icon-rugby{background-position:-1448px -494px}.icon-ax{background-position:-1503px -494px}.icon-table-tennis{background-position:-1562px -494px}.icon-bowling{background-position:-1618px -494px}.icon-tree-conifer{background-position:-1674px -494px}.icon-tree-deciduous{background-position:-1727px -494px}.icon-more-items{background-position:-1779px -494px}.icon-sort{background-position:-1832px -494px}.icon-filter{background-position:-1889px -494px}.icon-gamepad{background-position:-1941px -494px}.icon-playing-dices{background-position:0 -556px}.icon-calculator{background-position:-59px -556px}.icon-tie{background-position:-112px -556px}.icon-wallet{background-position:-155px -556px}.icon-share{background-position:-212px -556px}.icon-sampler{background-position:-266px -556px}.icon-piano{background-position:-325px -556px}.icon-web-browser{background-position:-380px -556px}.icon-blog{background-position:-436px -556px}.icon-dashboard{background-position:-489px -556px}.icon-certificate{background-position:-545px -556px}.icon-bell{background-position:-594px -556px}.icon-candle{background-position:-650px -556px}.icon-pin-classic{background-position:-702px -556px}.icon-iphone-shake{background-position:-758px -556px}.icon-pin-flag{background-position:-814px -556px}.icon-turtle{background-position:-876px -556px}.icon-rabbit{background-position:-936px -556px}.icon-globe{background-position:-994px -556px}.icon-briefcase{background-position:-1050px -556px}.icon-hdd{background-position:-1106px -556px}.icon-thumbs-up{background-position:-1162px -556px}.icon-thumbs-down{background-position:-1218px -556px}.icon-hand-right{background-position:-1274px -556px}.icon-hand-left{background-position:-1332px -556px}.icon-hand-up{background-position:-1390px -556px}.icon-hand-down{background-position:-1441px -556px}.icon-fullscreen{background-position:-1492px -556px}.icon-shopping-bag{background-position:-1548px -556px}.icon-book-open{background-position:-1603px -556px}.icon-nameplate{background-position:-1660px -556px}.icon-nameplate-alt{background-position:-1716px -556px}.icon-vases{background-position:-1772px -556px}.icon-announcement{background-position:-1828px -556px}.icon-dumbbell{background-position:-1885px -556px}.icon-suitcase{background-position:-1943px -556px}.icon-file-import{background-position:0 -618px}.icon-file-export{background-position:-54px -618px}.icon-pinterest{background-position:-109px -618px}.icon-dropbox{background-position:-165px -618px}.icon-google-alt{background-position:-221px -618px}.icon-jolicloud{background-position:-277px -618px}.icon-yahoo{background-position:-333px -618px}.icon-blogger{background-position:-389px -618px}.icon-picasa{background-position:-445px -618px}.icon-amazon{background-position:-501px -618px}.icon-tumblr{background-position:-557px -618px}.icon-wordpress{background-position:-613px -618px}.icon-instapaper{background-position:-669px -618px}.icon-evernote{background-position:-725px -618px}.icon-xing{background-position:-781px -618px}.icon-zootool{background-position:-837px -618px}.icon-dribbble{background-position:-893px -618px}.icon-deviantart{background-position:-949px -618px}.icon-read-it-later{background-position:-1005px -618px}.icon-linked-in{background-position:-1061px -618px}.icon-forrst{background-position:-1117px -618px}.icon-pinboard{background-position:-1173px -618px}.icon-behance{background-position:-1229px -618px}.icon-github{background-position:-1285px -618px}.icon-youtube{background-position:-1341px -618px}.icon-skitch{background-position:-1397px -618px}.icon-4square{background-position:-1453px -618px}.icon-quora{background-position:-1509px -618px}.icon-google-plus{background-position:-1565px -618px}.icon-spotify{background-position:-1621px -618px}.icon-stumbleupon{background-position:-1677px -618px}.icon-readability{background-position:-1733px -618px}.icon-facebook{background-position:-1789px -618px}.icon-twitter-t{background-position:-1845px -618px}.icon-twitter{background-position:-1901px -618px}.icon-buzz{background-position:-1957px -618px}.icon-vimeo{background-position:0 -680px}.icon-flickr{background-position:-56px -680px}.icon-last-fm{background-position:-112px -680px}.icon-rss{background-position:-168px -680px}.icon-skype{background-position:-224px -680px}.icon-e-mail{background-position:-280px -680px}.dropup,.dropdown{position:relative}.dropdown-toggle{*margin-bottom:-3px}.dropdown-toggle:active,.open .dropdown-toggle{outline:0}.caret{display:inline-block;width:0;height:0;vertical-align:top;border-top:4px solid #000;border-right:4px solid transparent;border-left:4px solid transparent;content:""}.dropdown .caret{margin-top:8px;margin-left:2px}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;list-style:none;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.2);*border-right-width:2px;*border-bottom-width:2px;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,0.2);-moz-box-shadow:0 5px 10px rgba(0,0,0,0.2);box-shadow:0 5px 10px rgba(0,0,0,0.2);-webkit-background-clip:padding-box;-moz-background-clip:padding;background-clip:padding-box}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{*width:100%;height:1px;margin:9px 1px;*margin:-5px 0 5px;overflow:hidden;background-color:#e5e5e5;border-bottom:1px solid #fff}.dropdown-menu a{display:block;padding:3px 20px;clear:both;font-weight:normal;line-height:20px;color:#333;white-space:nowrap}.dropdown-menu li>a:hover,.dropdown-menu li>a:focus,.dropdown-submenu:hover>a{color:#fff;text-decoration:none;background-color:#08c;background-color:#0081c2;background-image:-moz-linear-gradient(top,#08c,#0077b3);background-image:-webkit-gradient(linear,0 0,0 100%,from(#08c),to(#0077b3));background-image:-webkit-linear-gradient(top,#08c,#0077b3);background-image:-o-linear-gradient(top,#08c,#0077b3);background-image:linear-gradient(to bottom,#08c,#0077b3);background-repeat:repeat-x;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff0088cc',endColorstr='#ff0077b3',GradientType=0)}.dropdown-menu .active>a,.dropdown-menu .active>a:hover{color:#fff;text-decoration:none;background-color:#08c;background-color:#0081c2;background-image:linear-gradient(to bottom,#08c,#0077b3);background-image:-moz-linear-gradient(top,#08c,#0077b3);background-image:-webkit-gradient(linear,0 0,0 100%,from(#08c),to(#0077b3));background-image:-webkit-linear-gradient(top,#08c,#0077b3);background-image:-o-linear-gradient(top,#08c,#0077b3);background-repeat:repeat-x;outline:0;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff0088cc',endColorstr='#ff0077b3',GradientType=0)}.dropdown-menu .disabled>a,.dropdown-menu .disabled>a:hover{color:#999}.dropdown-menu .disabled>a:hover{text-decoration:none;cursor:default;background-color:transparent}.open{*z-index:1000}.open>.dropdown-menu{display:block}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{border-top:0;border-bottom:4px solid #000;content:"\2191"}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:1px}.dropdown-submenu{position:relative}.dropdown-submenu>.dropdown-menu{top:0;left:100%;margin-top:-6px;margin-left:-1px;-webkit-border-radius:0 6px 6px 6px;-moz-border-radius:0 6px 6px 6px;border-radius:0 6px 6px 6px}.dropdown-submenu:hover .dropdown-menu{display:block}.dropdown-submenu>a:after{display:block;float:right;width:0;height:0;margin-top:5px;margin-right:-10px;border-color:transparent;border-left-color:#ccc;border-style:solid;border-width:5px 0 5px 5px;content:" "}.dropdown-submenu:hover>a:after{border-left-color:#fff}.dropdown .dropdown-menu .nav-header{padding-right:20px;padding-left:20px}.typeahead{margin-top:2px;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.05);box-shadow:inset 0 1px 1px rgba(0,0,0,0.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,0.15)}.well-large{padding:24px;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.well-small{padding:9px;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.fade{opacity:0;-webkit-transition:opacity .15s linear;-moz-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{position:relative;height:0;overflow:hidden;overflow:visible \9;-webkit-transition:height .35s ease;-moz-transition:height .35s ease;-o-transition:height .35s ease;transition:height .35s ease}.collapse.in{height:auto}.close{float:right;font-size:20px;font-weight:bold;line-height:20px;color:#000;text-shadow:0 1px 0 #fff;opacity:.2;filter:alpha(opacity=20)}.close:hover{color:#000;text-decoration:none;cursor:pointer;opacity:.4;filter:alpha(opacity=40)}button.close{padding:0;cursor:pointer;background:transparent;border:0;-webkit-appearance:none}.btn{display:inline-block;*display:inline;padding:4px 14px;margin-bottom:0;*margin-left:.3em;font-size:14px;line-height:20px;*line-height:20px;color:#333;text-align:center;text-shadow:0 1px 1px rgba(255,255,255,0.75);vertical-align:middle;cursor:pointer;background-color:#f5f5f5;*background-color:#e6e6e6;background-image:-webkit-gradient(linear,0 0,0 100%,from(#fff),to(#e6e6e6));background-image:-webkit-linear-gradient(top,#fff,#e6e6e6);background-image:-o-linear-gradient(top,#fff,#e6e6e6);background-image:linear-gradient(to bottom,#fff,#e6e6e6);background-image:-moz-linear-gradient(top,#fff,#e6e6e6);background-repeat:repeat-x;border:1px solid #bbb;*border:0;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);border-color:#e6e6e6 #e6e6e6 #bfbfbf;border-bottom-color:#a2a2a2;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ffffffff',endColorstr='#ffe6e6e6',GradientType=0);filter:progid:dximagetransform.microsoft.gradient(enabled=false);*zoom:1;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05)}.btn:hover,.btn:active,.btn.active,.btn.disabled,.btn[disabled]{color:#333;background-color:#e6e6e6;*background-color:#d9d9d9}.btn:active,.btn.active{background-color:#ccc \9}.btn:first-child{*margin-left:0}.btn:hover{color:#333;text-decoration:none;background-color:#e6e6e6;*background-color:#d9d9d9;background-position:0 -15px;-webkit-transition:background-position .1s linear;-moz-transition:background-position .1s linear;-o-transition:background-position .1s linear;transition:background-position .1s linear}.btn:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn.active,.btn:active{background-color:#e6e6e6;background-color:#d9d9d9 \9;background-image:none;outline:0;-webkit-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05)}.btn.disabled,.btn[disabled]{cursor:default;background-color:#e6e6e6;background-image:none;opacity:.65;filter:alpha(opacity=65);-webkit-box-shadow:none;-moz-box-shadow:none;box-shadow:none}.btn-large{padding:9px 14px;font-size:16px;line-height:normal;-webkit-border-radius:5px;-moz-border-radius:5px;border-radius:5px}.btn-large [class^="icon-"]{margin-top:2px}.btn-small{padding:3px 9px;font-size:12px;line-height:18px}.btn-small [class^="icon-"]{margin-top:0}.btn-mini{padding:2px 6px;font-size:11px;line-height:16px}.btn-block{display:block;width:100%;padding-right:0;padding-left:0;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.btn-block+.btn-block{margin-top:5px}.btn-primary.active,.btn-warning.active,.btn-danger.active,.btn-success.active,.btn-info.active,.btn-inverse.active{color:rgba(255,255,255,0.75)}.btn{border-color:#c5c5c5;border-color:rgba(0,0,0,0.15) rgba(0,0,0,0.15) rgba(0,0,0,0.25)}.btn-primary{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#006dcc;*background-color:#04c;background-image:-webkit-gradient(linear,0 0,0 100%,from(#08c),to(#04c));background-image:-webkit-linear-gradient(top,#08c,#04c);background-image:-o-linear-gradient(top,#08c,#04c);background-image:linear-gradient(to bottom,#08c,#04c);background-image:-moz-linear-gradient(top,#08c,#04c);background-repeat:repeat-x;border-color:#04c #04c #002a80;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff0088cc',endColorstr='#ff0044cc',GradientType=0);filter:progid:dximagetransform.microsoft.gradient(enabled=false)}.btn-primary:hover,.btn-primary:active,.btn-primary.active,.btn-primary.disabled,.btn-primary[disabled]{color:#fff;background-color:#04c;*background-color:#003bb3}.btn-primary:active,.btn-primary.active{background-color:#039 \9}.btn-warning{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#faa732;*background-color:#f89406;background-image:-webkit-gradient(linear,0 0,0 100%,from(#fbb450),to(#f89406));background-image:-webkit-linear-gradient(top,#fbb450,#f89406);background-image:-o-linear-gradient(top,#fbb450,#f89406);background-image:linear-gradient(to bottom,#fbb450,#f89406);background-image:-moz-linear-gradient(top,#fbb450,#f89406);background-repeat:repeat-x;border-color:#f89406 #f89406 #ad6704;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:dximagetransform.microsoft.gradient(startColorstr='#fffbb450',endColorstr='#fff89406',GradientType=0);filter:progid:dximagetransform.microsoft.gradient(enabled=false)}.btn-warning:hover,.btn-warning:active,.btn-warning.active,.btn-warning.disabled,.btn-warning[disabled]{color:#fff;background-color:#f89406;*background-color:#df8505}.btn-warning:active,.btn-warning.active{background-color:#c67605 \9}.btn-danger{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#da4f49;*background-color:#bd362f;background-image:-webkit-gradient(linear,0 0,0 100%,from(#ee5f5b),to(#bd362f));background-image:-webkit-linear-gradient(top,#ee5f5b,#bd362f);background-image:-o-linear-gradient(top,#ee5f5b,#bd362f);background-image:linear-gradient(to bottom,#ee5f5b,#bd362f);background-image:-moz-linear-gradient(top,#ee5f5b,#bd362f);background-repeat:repeat-x;border-color:#bd362f #bd362f #802420;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ffee5f5b',endColorstr='#ffbd362f',GradientType=0);filter:progid:dximagetransform.microsoft.gradient(enabled=false)}.btn-danger:hover,.btn-danger:active,.btn-danger.active,.btn-danger.disabled,.btn-danger[disabled]{color:#fff;background-color:#bd362f;*background-color:#a9302a}.btn-danger:active,.btn-danger.active{background-color:#942a25 \9}.btn-success{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#5bb75b;*background-color:#51a351;background-image:-webkit-gradient(linear,0 0,0 100%,from(#62c462),to(#51a351));background-image:-webkit-linear-gradient(top,#62c462,#51a351);background-image:-o-linear-gradient(top,#62c462,#51a351);background-image:linear-gradient(to bottom,#62c462,#51a351);background-image:-moz-linear-gradient(top,#62c462,#51a351);background-repeat:repeat-x;border-color:#51a351 #51a351 #387038;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff62c462',endColorstr='#ff51a351',GradientType=0);filter:progid:dximagetransform.microsoft.gradient(enabled=false)}.btn-success:hover,.btn-success:active,.btn-success.active,.btn-success.disabled,.btn-success[disabled]{color:#fff;background-color:#51a351;*background-color:#499249}.btn-success:active,.btn-success.active{background-color:#408140 \9}.btn-info{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#49afcd;*background-color:#2f96b4;background-image:-webkit-gradient(linear,0 0,0 100%,from(#5bc0de),to(#2f96b4));background-image:-webkit-linear-gradient(top,#5bc0de,#2f96b4);background-image:-o-linear-gradient(top,#5bc0de,#2f96b4);background-image:linear-gradient(to bottom,#5bc0de,#2f96b4);background-image:-moz-linear-gradient(top,#5bc0de,#2f96b4);background-repeat:repeat-x;border-color:#2f96b4 #2f96b4 #1f6377;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff2f96b4',GradientType=0);filter:progid:dximagetransform.microsoft.gradient(enabled=false)}.btn-info:hover,.btn-info:active,.btn-info.active,.btn-info.disabled,.btn-info[disabled]{color:#fff;background-color:#2f96b4;*background-color:#2a85a0}.btn-info:active,.btn-info.active{background-color:#24748c \9}.btn-inverse{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#363636;*background-color:#222;background-image:-webkit-gradient(linear,0 0,0 100%,from(#444),to(#222));background-image:-webkit-linear-gradient(top,#444,#222);background-image:-o-linear-gradient(top,#444,#222);background-image:linear-gradient(to bottom,#444,#222);background-image:-moz-linear-gradient(top,#444,#222);background-repeat:repeat-x;border-color:#222 #222 #000;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff444444',endColorstr='#ff222222',GradientType=0);filter:progid:dximagetransform.microsoft.gradient(enabled=false)}.btn-inverse:hover,.btn-inverse:active,.btn-inverse.active,.btn-inverse.disabled,.btn-inverse[disabled]{color:#fff;background-color:#222;*background-color:#151515}.btn-inverse:active,.btn-inverse.active{background-color:#080808 \9}button.btn,input[type="submit"].btn{*padding-top:3px;*padding-bottom:3px}button.btn::-moz-focus-inner,input[type="submit"].btn::-moz-focus-inner{padding:0;border:0}button.btn.btn-large,input[type="submit"].btn.btn-large{*padding-top:7px;*padding-bottom:7px}button.btn.btn-small,input[type="submit"].btn.btn-small{*padding-top:3px;*padding-bottom:3px}button.btn.btn-mini,input[type="submit"].btn.btn-mini{*padding-top:1px;*padding-bottom:1px}.btn-link,.btn-link:active{background-color:transparent;background-image:none;-webkit-box-shadow:none;-moz-box-shadow:none;box-shadow:none}.btn-link{color:#08c;cursor:pointer;border-color:transparent;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.btn-link:hover{color:#005580;text-decoration:underline;background-color:transparent}.btn-group{position:relative;*margin-left:.3em;font-size:0;white-space:nowrap}.btn-group:first-child{*margin-left:0}.btn-group+.btn-group{margin-left:5px}.btn-toolbar{margin-top:10px;margin-bottom:10px;font-size:0}.btn-toolbar .btn-group{display:inline-block;*display:inline;*zoom:1}.btn-toolbar .btn+.btn,.btn-toolbar .btn-group+.btn,.btn-toolbar .btn+.btn-group{margin-left:5px}.btn-group>.btn{position:relative;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.btn-group>.btn+.btn{margin-left:-1px}.btn-group>.btn,.btn-group>.dropdown-menu{font-size:14px}.btn-group>.btn-mini{font-size:11px}.btn-group>.btn-small{font-size:12px}.btn-group>.btn-large{font-size:16px}.btn-group>.btn:first-child{margin-left:0;-webkit-border-bottom-left-radius:4px;border-bottom-left-radius:4px;-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-bottomleft:4px;-moz-border-radius-topleft:4px}.btn-group>.btn:last-child,.btn-group>.dropdown-toggle{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-webkit-border-bottom-right-radius:4px;border-bottom-right-radius:4px;-moz-border-radius-topright:4px;-moz-border-radius-bottomright:4px}.btn-group>.btn.large:first-child{margin-left:0;-webkit-border-bottom-left-radius:6px;border-bottom-left-radius:6px;-webkit-border-top-left-radius:6px;border-top-left-radius:6px;-moz-border-radius-bottomleft:6px;-moz-border-radius-topleft:6px}.btn-group>.btn.large:last-child,.btn-group>.large.dropdown-toggle{-webkit-border-top-right-radius:6px;border-top-right-radius:6px;-webkit-border-bottom-right-radius:6px;border-bottom-right-radius:6px;-moz-border-radius-topright:6px;-moz-border-radius-bottomright:6px}.btn-group>.btn:hover,.btn-group>.btn:focus,.btn-group>.btn:active,.btn-group>.btn.active{z-index:2}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{*padding-top:5px;padding-right:8px;*padding-bottom:5px;padding-left:8px;-webkit-box-shadow:inset 1px 0 0 rgba(255,255,255,0.125),inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 1px 0 0 rgba(255,255,255,0.125),inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 1px 0 0 rgba(255,255,255,0.125),inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05)}.btn-group>.btn-mini+.dropdown-toggle{*padding-top:2px;padding-right:5px;*padding-bottom:2px;padding-left:5px}.btn-group>.btn-small+.dropdown-toggle{*padding-top:5px;*padding-bottom:4px}.btn-group>.btn-large+.dropdown-toggle{*padding-top:7px;padding-right:12px;*padding-bottom:7px;padding-left:12px}.btn-group.open .dropdown-toggle{background-image:none;-webkit-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05)}.btn-group.open .btn.dropdown-toggle{background-color:#e6e6e6}.btn-group.open .btn-primary.dropdown-toggle{background-color:#04c}.btn-group.open .btn-warning.dropdown-toggle{background-color:#f89406}.btn-group.open .btn-danger.dropdown-toggle{background-color:#bd362f}.btn-group.open .btn-success.dropdown-toggle{background-color:#51a351}.btn-group.open .btn-info.dropdown-toggle{background-color:#2f96b4}.btn-group.open .btn-inverse.dropdown-toggle{background-color:#222}.btn .caret{margin-top:8px;margin-left:0}.btn-mini .caret,.btn-small .caret,.btn-large .caret{margin-top:6px}.btn-large .caret{border-top-width:5px;border-right-width:5px;border-left-width:5px}.dropup .btn-large .caret{border-top:0;border-bottom:5px solid #000}.btn-primary .caret,.btn-warning .caret,.btn-danger .caret,.btn-info .caret,.btn-success .caret,.btn-inverse .caret{border-top-color:#fff;border-bottom-color:#fff}.btn-group-vertical{display:inline-block;*display:inline;*zoom:1}.btn-group-vertical .btn{display:block;float:none;width:100%;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.btn-group-vertical .btn+.btn{margin-top:-1px;margin-left:0}.btn-group-vertical .btn:first-child{-webkit-border-radius:4px 4px 0 0;-moz-border-radius:4px 4px 0 0;border-radius:4px 4px 0 0}.btn-group-vertical .btn:last-child{-webkit-border-radius:0 0 4px 4px;-moz-border-radius:0 0 4px 4px;border-radius:0 0 4px 4px}.btn-group-vertical .btn-large:first-child{-webkit-border-radius:6px 6px 0 0;-moz-border-radius:6px 6px 0 0;border-radius:6px 6px 0 0}.btn-group-vertical .btn-large:last-child{-webkit-border-radius:0 0 6px 6px;-moz-border-radius:0 0 6px 6px;border-radius:0 0 6px 6px}.alert{padding:8px 35px 8px 14px;margin-bottom:20px;color:#c09853;text-shadow:0 1px 0 rgba(255,255,255,0.5);background-color:#fcf8e3;border:1px solid #fbeed5;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.alert h4{margin:0}.alert .close{position:relative;top:-2px;right:-21px;line-height:20px}.alert-success{color:#468847;background-color:#dff0d8;border-color:#d6e9c6}.alert-danger,.alert-error{color:#b94a48;background-color:#f2dede;border-color:#eed3d7}.alert-info{color:#3a87ad;background-color:#d9edf7;border-color:#bce8f1}.alert-block{padding-top:14px;padding-bottom:14px}.alert-block>p,.alert-block>ul{margin-bottom:0}.alert-block p+p{margin-top:5px}.nav{margin-bottom:20px;margin-left:0;list-style:none}.nav>li>a{display:block}.nav>li>a:hover{text-decoration:none;background-color:#eee}.nav>.pull-right{float:right}.nav-header{display:block;padding:3px 15px;font-size:11px;font-weight:bold;line-height:20px;color:#999;text-shadow:0 1px 0 rgba(255,255,255,0.5);text-transform:uppercase}.nav li+.nav-header{margin-top:9px}.nav-list{padding-right:15px;padding-left:15px;margin-bottom:0}.nav-list>li>a,.nav-list .nav-header{margin-right:-15px;margin-left:-15px;text-shadow:0 1px 0 rgba(255,255,255,0.5)}.nav-list>li>a{padding:3px 15px}.nav-list>.active>a,.nav-list>.active>a:hover{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.2);background-color:#08c}.nav-list [class^="icon-"]{margin-right:2px}.nav-list .divider{*width:100%;height:1px;margin:9px 1px;*margin:-5px 0 5px;overflow:hidden;background-color:#e5e5e5;border-bottom:1px solid #fff}.nav-tabs,.nav-pills{*zoom:1}.nav-tabs:before,.nav-pills:before,.nav-tabs:after,.nav-pills:after{display:table;line-height:0;content:""}.nav-tabs:after,.nav-pills:after{clear:both}.nav-tabs>li,.nav-pills>li{float:left}.nav-tabs>li>a,.nav-pills>li>a{padding-right:12px;padding-left:12px;margin-right:2px;line-height:14px}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{margin-bottom:-1px}.nav-tabs>li>a{padding-top:8px;padding-bottom:8px;line-height:20px;border:1px solid transparent;-webkit-border-radius:4px 4px 0 0;-moz-border-radius:4px 4px 0 0;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>.active>a,.nav-tabs>.active>a:hover{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-pills>li>a{padding-top:8px;padding-bottom:8px;margin-top:2px;margin-bottom:2px;-webkit-border-radius:5px;-moz-border-radius:5px;border-radius:5px}.nav-pills>.active>a,.nav-pills>.active>a:hover{color:#fff;background-color:#08c}.nav-stacked>li{float:none}.nav-stacked>li>a{margin-right:0}.nav-tabs.nav-stacked{border-bottom:0}.nav-tabs.nav-stacked>li>a{border:1px solid #ddd;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.nav-tabs.nav-stacked>li:first-child>a{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-topright:4px;-moz-border-radius-topleft:4px}.nav-tabs.nav-stacked>li:last-child>a{-webkit-border-bottom-right-radius:4px;border-bottom-right-radius:4px;-webkit-border-bottom-left-radius:4px;border-bottom-left-radius:4px;-moz-border-radius-bottomright:4px;-moz-border-radius-bottomleft:4px}.nav-tabs.nav-stacked>li>a:hover{z-index:2;border-color:#ddd}.nav-pills.nav-stacked>li>a{margin-bottom:3px}.nav-pills.nav-stacked>li:last-child>a{margin-bottom:1px}.nav-tabs .dropdown-menu{-webkit-border-radius:0 0 6px 6px;-moz-border-radius:0 0 6px 6px;border-radius:0 0 6px 6px}.nav-pills .dropdown-menu{-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.nav .dropdown-toggle .caret{margin-top:6px;border-top-color:#08c;border-bottom-color:#08c}.nav .dropdown-toggle:hover .caret{border-top-color:#005580;border-bottom-color:#005580}.nav-tabs .dropdown-toggle .caret{margin-top:8px}.nav .active .dropdown-toggle .caret{border-top-color:#fff;border-bottom-color:#fff}.nav-tabs .active .dropdown-toggle .caret{border-top-color:#555;border-bottom-color:#555}.nav>.dropdown.active>a:hover{cursor:pointer}.nav-tabs .open .dropdown-toggle,.nav-pills .open .dropdown-toggle,.nav>li.dropdown.open.active>a:hover{color:#fff;background-color:#999;border-color:#999}.nav li.dropdown.open .caret,.nav li.dropdown.open.active .caret,.nav li.dropdown.open a:hover .caret{border-top-color:#fff;border-bottom-color:#fff;opacity:1;filter:alpha(opacity=100)}.tabs-stacked .open>a:hover{border-color:#999}.tabbable{*zoom:1}.tabbable:before,.tabbable:after{display:table;line-height:0;content:""}.tabbable:after{clear:both}.tab-content{overflow:auto}.tabs-below>.nav-tabs,.tabs-right>.nav-tabs,.tabs-left>.nav-tabs{border-bottom:0}.tab-content>.tab-pane,.pill-content>.pill-pane{display:none}.tab-content>.active,.pill-content>.active{display:block}.tabs-below>.nav-tabs{border-top:1px solid #ddd}.tabs-below>.nav-tabs>li{margin-top:-1px;margin-bottom:0}.tabs-below>.nav-tabs>li>a{-webkit-border-radius:0 0 4px 4px;-moz-border-radius:0 0 4px 4px;border-radius:0 0 4px 4px}.tabs-below>.nav-tabs>li>a:hover{border-top-color:#ddd;border-bottom-color:transparent}.tabs-below>.nav-tabs>.active>a,.tabs-below>.nav-tabs>.active>a:hover{border-color:transparent #ddd #ddd #ddd}.tabs-left>.nav-tabs>li,.tabs-right>.nav-tabs>li{float:none}.tabs-left>.nav-tabs>li>a,.tabs-right>.nav-tabs>li>a{min-width:74px;margin-right:0;margin-bottom:3px}.tabs-left>.nav-tabs{float:left;margin-right:19px;border-right:1px solid #ddd}.tabs-left>.nav-tabs>li>a{margin-right:-1px;-webkit-border-radius:4px 0 0 4px;-moz-border-radius:4px 0 0 4px;border-radius:4px 0 0 4px}.tabs-left>.nav-tabs>li>a:hover{border-color:#eee #ddd #eee #eee}.tabs-left>.nav-tabs .active>a,.tabs-left>.nav-tabs .active>a:hover{border-color:#ddd transparent #ddd #ddd;*border-right-color:#fff}.tabs-right>.nav-tabs{float:right;margin-left:19px;border-left:1px solid #ddd}.tabs-right>.nav-tabs>li>a{margin-left:-1px;-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.tabs-right>.nav-tabs>li>a:hover{border-color:#eee #eee #eee #ddd}.tabs-right>.nav-tabs .active>a,.tabs-right>.nav-tabs .active>a:hover{border-color:#ddd #ddd #ddd transparent;*border-left-color:#fff}.nav>.disabled>a{color:#999}.nav>.disabled>a:hover{text-decoration:none;cursor:default;background-color:transparent}.navbar{*position:relative;*z-index:2;margin-bottom:20px;overflow:visible;color:#555}.navbar-inner{min-height:40px;padding-right:20px;padding-left:20px;background-color:#fafafa;background-image:-moz-linear-gradient(top,#fff,#f2f2f2);background-image:-webkit-gradient(linear,0 0,0 100%,from(#fff),to(#f2f2f2));background-image:-webkit-linear-gradient(top,#fff,#f2f2f2);background-image:-o-linear-gradient(top,#fff,#f2f2f2);background-image:linear-gradient(to bottom,#fff,#f2f2f2);background-repeat:repeat-x;border:1px solid #d4d4d4;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ffffffff',endColorstr='#fff2f2f2',GradientType=0);-webkit-box-shadow:0 1px 4px rgba(0,0,0,0.065);-moz-box-shadow:0 1px 4px rgba(0,0,0,0.065);box-shadow:0 1px 4px rgba(0,0,0,0.065)}.navbar .container{width:auto}.nav-collapse.collapse{height:auto}.navbar .brand{display:block;float:left;padding:10px 20px 10px;margin-left:-20px;font-size:20px;font-weight:200;color:#555;text-shadow:0 1px 0 #fff}.navbar .brand:hover{text-decoration:none}.navbar-text{margin-bottom:0;line-height:40px}.navbar-link{color:#555}.navbar-link:hover{color:#333}.navbar .divider-vertical{height:40px;margin:0 9px;border-right:1px solid #fff;border-left:1px solid #f2f2f2}.navbar .btn,.navbar .btn-group{margin-top:6px}.navbar .btn-group .btn{margin:0}.navbar-form{margin-bottom:0;*zoom:1}.navbar-form:before,.navbar-form:after{display:table;line-height:0;content:""}.navbar-form:after{clear:both}.navbar-form input,.navbar-form select,.navbar-form .radio,.navbar-form .checkbox{margin-top:5px}.navbar-form input,.navbar-form select,.navbar-form .btn{display:inline-block;margin-bottom:0}.navbar-form input[type="image"],.navbar-form input[type="checkbox"],.navbar-form input[type="radio"]{margin-top:3px}.navbar-form .input-append,.navbar-form .input-prepend{margin-top:6px;white-space:nowrap}.navbar-form .input-append input,.navbar-form .input-prepend input{margin-top:0}.navbar-search{position:relative;float:left;margin-top:5px;margin-bottom:0}.navbar-search .search-query{padding:4px 14px;margin-bottom:0;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:13px;font-weight:normal;line-height:1;-webkit-border-radius:15px;-moz-border-radius:15px;border-radius:15px}.navbar-static-top{position:static;width:100%;margin-bottom:0}.navbar-static-top .navbar-inner{-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.navbar-fixed-top,.navbar-fixed-bottom{position:fixed;right:0;left:0;z-index:1030;margin-bottom:0}.navbar-fixed-top .navbar-inner,.navbar-fixed-bottom .navbar-inner,.navbar-static-top .navbar-inner{border:0}.navbar-fixed-top .navbar-inner,.navbar-fixed-bottom .navbar-inner{padding-right:0;padding-left:0;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.navbar-static-top .container,.navbar-fixed-top .container,.navbar-fixed-bottom .container{width:940px}.navbar-fixed-top{top:0}.navbar-fixed-top .navbar-inner,.navbar-static-top .navbar-inner{-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.1),0 1px 10px rgba(0,0,0,0.1);-moz-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.1),0 1px 10px rgba(0,0,0,0.1);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.1),0 1px 10px rgba(0,0,0,0.1)}.navbar-fixed-bottom{bottom:0}.navbar-fixed-bottom .navbar-inner{-webkit-box-shadow:inset 0 1px 0 rgba(0,0,0,0.1),0 -1px 10px rgba(0,0,0,0.1);-moz-box-shadow:inset 0 1px 0 rgba(0,0,0,0.1),0 -1px 10px rgba(0,0,0,0.1);box-shadow:inset 0 1px 0 rgba(0,0,0,0.1),0 -1px 10px rgba(0,0,0,0.1)}.navbar .nav{position:relative;left:0;display:block;float:left;margin:0 10px 0 0}.navbar .nav.pull-right{float:right}.navbar .nav>li{float:left}.navbar .nav>li>a{float:none;padding:10px 15px 10px;color:#555;text-decoration:none;text-shadow:0 1px 0 #fff}.navbar .nav .dropdown-toggle .caret{margin-top:8px}.navbar .nav>li>a:focus,.navbar .nav>li>a:hover{color:#333;text-decoration:none;background-color:transparent}.navbar .nav>.active>a,.navbar .nav>.active>a:hover,.navbar .nav>.active>a:focus{color:#555;text-decoration:none;background-color:#e5e5e5;-webkit-box-shadow:inset 0 3px 8px rgba(0,0,0,0.125);-moz-box-shadow:inset 0 3px 8px rgba(0,0,0,0.125);box-shadow:inset 0 3px 8px rgba(0,0,0,0.125)}.navbar .btn-navbar{display:none;float:right;padding:7px 10px;margin-right:5px;margin-left:5px;color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#ededed;*background-color:#e5e5e5;background-image:-webkit-gradient(linear,0 0,0 100%,from(#f2f2f2),to(#e5e5e5));background-image:-webkit-linear-gradient(top,#f2f2f2,#e5e5e5);background-image:-o-linear-gradient(top,#f2f2f2,#e5e5e5);background-image:linear-gradient(to bottom,#f2f2f2,#e5e5e5);background-image:-moz-linear-gradient(top,#f2f2f2,#e5e5e5);background-repeat:repeat-x;border-color:#e5e5e5 #e5e5e5 #bfbfbf;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:dximagetransform.microsoft.gradient(startColorstr='#fff2f2f2',endColorstr='#ffe5e5e5',GradientType=0);filter:progid:dximagetransform.microsoft.gradient(enabled=false);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.075);-moz-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.075);box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.075)}.navbar .btn-navbar:hover,.navbar .btn-navbar:active,.navbar .btn-navbar.active,.navbar .btn-navbar.disabled,.navbar .btn-navbar[disabled]{color:#fff;background-color:#e5e5e5;*background-color:#d9d9d9}.navbar .btn-navbar:active,.navbar .btn-navbar.active{background-color:#ccc \9}.navbar .btn-navbar .icon-bar{display:block;width:18px;height:2px;background-color:#f5f5f5;-webkit-border-radius:1px;-moz-border-radius:1px;border-radius:1px;-webkit-box-shadow:0 1px 0 rgba(0,0,0,0.25);-moz-box-shadow:0 1px 0 rgba(0,0,0,0.25);box-shadow:0 1px 0 rgba(0,0,0,0.25)}.btn-navbar .icon-bar+.icon-bar{margin-top:3px}.navbar .nav>li>.dropdown-menu:before{position:absolute;top:-7px;left:9px;display:inline-block;border-right:7px solid transparent;border-bottom:7px solid #ccc;border-left:7px solid transparent;border-bottom-color:rgba(0,0,0,0.2);content:''}.navbar .nav>li>.dropdown-menu:after{position:absolute;top:-6px;left:10px;display:inline-block;border-right:6px solid transparent;border-bottom:6px solid #fff;border-left:6px solid transparent;content:''}.navbar-fixed-bottom .nav>li>.dropdown-menu:before{top:auto;bottom:-7px;border-top:7px solid #ccc;border-bottom:0;border-top-color:rgba(0,0,0,0.2)}.navbar-fixed-bottom .nav>li>.dropdown-menu:after{top:auto;bottom:-6px;border-top:6px solid #fff;border-bottom:0}.navbar .nav li.dropdown.open>.dropdown-toggle,.navbar .nav li.dropdown.active>.dropdown-toggle,.navbar .nav li.dropdown.open.active>.dropdown-toggle{color:#555;background-color:#e5e5e5}.navbar .nav li.dropdown>.dropdown-toggle .caret{border-top-color:#555;border-bottom-color:#555}.navbar .nav li.dropdown.open>.dropdown-toggle .caret,.navbar .nav li.dropdown.active>.dropdown-toggle .caret,.navbar .nav li.dropdown.open.active>.dropdown-toggle .caret{border-top-color:#555;border-bottom-color:#555}.navbar .pull-right>li>.dropdown-menu,.navbar .nav>li>.dropdown-menu.pull-right{right:0;left:auto}.navbar .pull-right>li>.dropdown-menu:before,.navbar .nav>li>.dropdown-menu.pull-right:before{right:12px;left:auto}.navbar .pull-right>li>.dropdown-menu:after,.navbar .nav>li>.dropdown-menu.pull-right:after{right:13px;left:auto}.navbar .pull-right>li>.dropdown-menu .dropdown-menu,.navbar .nav>li>.dropdown-menu.pull-right .dropdown-menu{right:100%;left:auto;margin-right:-1px;margin-left:0;-webkit-border-radius:6px 0 6px 6px;-moz-border-radius:6px 0 6px 6px;border-radius:6px 0 6px 6px}.navbar-inverse{color:#999}.navbar-inverse .navbar-inner{background-color:#1b1b1b;background-image:-moz-linear-gradient(top,#222,#111);background-image:-webkit-gradient(linear,0 0,0 100%,from(#222),to(#111));background-image:-webkit-linear-gradient(top,#222,#111);background-image:-o-linear-gradient(top,#222,#111);background-image:linear-gradient(to bottom,#222,#111);background-repeat:repeat-x;border-color:#252525;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff222222',endColorstr='#ff111111',GradientType=0)}.navbar-inverse .brand,.navbar-inverse .nav>li>a{color:#999;text-shadow:0 -1px 0 rgba(0,0,0,0.25)}.navbar-inverse .brand:hover,.navbar-inverse .nav>li>a:hover{color:#fff}.navbar-inverse .nav>li>a:focus,.navbar-inverse .nav>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .nav .active>a,.navbar-inverse .nav .active>a:hover,.navbar-inverse .nav .active>a:focus{color:#fff;background-color:#111}.navbar-inverse .navbar-link{color:#999}.navbar-inverse .navbar-link:hover{color:#fff}.navbar-inverse .divider-vertical{border-right-color:#222;border-left-color:#111}.navbar-inverse .nav li.dropdown.open>.dropdown-toggle,.navbar-inverse .nav li.dropdown.active>.dropdown-toggle,.navbar-inverse .nav li.dropdown.open.active>.dropdown-toggle{color:#fff;background-color:#111}.navbar-inverse .nav li.dropdown>.dropdown-toggle .caret{border-top-color:#999;border-bottom-color:#999}.navbar-inverse .nav li.dropdown.open>.dropdown-toggle .caret,.navbar-inverse .nav li.dropdown.active>.dropdown-toggle .caret,.navbar-inverse .nav li.dropdown.open.active>.dropdown-toggle .caret{border-top-color:#fff;border-bottom-color:#fff}.navbar-inverse .navbar-search .search-query{color:#fff;background-color:#515151;border-color:#111;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1),0 1px 0 rgba(255,255,255,0.15);-moz-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1),0 1px 0 rgba(255,255,255,0.15);box-shadow:inset 0 1px 2px rgba(0,0,0,0.1),0 1px 0 rgba(255,255,255,0.15);-webkit-transition:none;-moz-transition:none;-o-transition:none;transition:none}.navbar-inverse .navbar-search .search-query:-moz-placeholder{color:#ccc}.navbar-inverse .navbar-search .search-query:-ms-input-placeholder{color:#ccc}.navbar-inverse .navbar-search .search-query::-webkit-input-placeholder{color:#ccc}.navbar-inverse .navbar-search .search-query:focus,.navbar-inverse .navbar-search .search-query.focused{padding:5px 15px;color:#333;text-shadow:0 1px 0 #fff;background-color:#fff;border:0;outline:0;-webkit-box-shadow:0 0 3px rgba(0,0,0,0.15);-moz-box-shadow:0 0 3px rgba(0,0,0,0.15);box-shadow:0 0 3px rgba(0,0,0,0.15)}.navbar-inverse .btn-navbar{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#0e0e0e;*background-color:#040404;background-image:-webkit-gradient(linear,0 0,0 100%,from(#151515),to(#040404));background-image:-webkit-linear-gradient(top,#151515,#040404);background-image:-o-linear-gradient(top,#151515,#040404);background-image:linear-gradient(to bottom,#151515,#040404);background-image:-moz-linear-gradient(top,#151515,#040404);background-repeat:repeat-x;border-color:#040404 #040404 #000;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff151515',endColorstr='#ff040404',GradientType=0);filter:progid:dximagetransform.microsoft.gradient(enabled=false)}.navbar-inverse .btn-navbar:hover,.navbar-inverse .btn-navbar:active,.navbar-inverse .btn-navbar.active,.navbar-inverse .btn-navbar.disabled,.navbar-inverse .btn-navbar[disabled]{color:#fff;background-color:#040404;*background-color:#000}.navbar-inverse .btn-navbar:active,.navbar-inverse .btn-navbar.active{background-color:#000 \9}.breadcrumb{padding:8px 15px;margin:0 0 20px;list-style:none;background-color:#f5f5f5;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.breadcrumb li{display:inline-block;*display:inline;text-shadow:0 1px 0 #fff;*zoom:1}.breadcrumb .divider{padding:0 5px;color:#ccc}.breadcrumb .active{color:#999}.pagination{height:40px;margin:20px 0}.pagination ul{display:inline-block;*display:inline;margin-bottom:0;margin-left:0;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px;*zoom:1;-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:0 1px 2px rgba(0,0,0,0.05);box-shadow:0 1px 2px rgba(0,0,0,0.05)}.pagination li{display:inline}.pagination a,.pagination span{float:left;padding:0 14px;line-height:38px;text-decoration:none;background-color:#fff;border:1px solid #ddd;border-left-width:0}.pagination a:hover,.pagination .active a,.pagination .active span{background-color:#f5f5f5}.pagination .active a,.pagination .active span{color:#999;cursor:default}.pagination .disabled span,.pagination .disabled a,.pagination .disabled a:hover{color:#999;cursor:default;background-color:transparent}.pagination li:first-child a,.pagination li:first-child span{border-left-width:1px;-webkit-border-radius:3px 0 0 3px;-moz-border-radius:3px 0 0 3px;border-radius:3px 0 0 3px}.pagination li:last-child a,.pagination li:last-child span{-webkit-border-radius:0 3px 3px 0;-moz-border-radius:0 3px 3px 0;border-radius:0 3px 3px 0}.pagination-centered{text-align:center}.pagination-right{text-align:right}.pager{margin:20px 0;text-align:center;list-style:none;*zoom:1}.pager:before,.pager:after{display:table;line-height:0;content:""}.pager:after{clear:both}.pager li{display:inline}.pager a{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;-webkit-border-radius:15px;-moz-border-radius:15px;border-radius:15px}.pager a:hover{text-decoration:none;background-color:#f5f5f5}.pager .next a{float:right}.pager .previous a{float:left}.pager .disabled a,.pager .disabled a:hover{color:#999;cursor:default;background-color:#fff}.modal-open .dropdown-menu{z-index:2050}.modal-open .dropdown.open{*z-index:2050}.modal-open .popover{z-index:2060}.modal-open .tooltip{z-index:2080}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{opacity:0}.modal-backdrop,.modal-backdrop.fade.in{opacity:.8;filter:alpha(opacity=80)}.modal{position:fixed;top:50%;left:50%;z-index:1050;width:560px;margin:-250px 0 0 -280px;overflow:auto;background-color:#fff;border:1px solid #999;border:1px solid rgba(0,0,0,0.3);*border:1px solid #999;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;-webkit-box-shadow:0 3px 7px rgba(0,0,0,0.3);-moz-box-shadow:0 3px 7px rgba(0,0,0,0.3);box-shadow:0 3px 7px rgba(0,0,0,0.3);-webkit-background-clip:padding-box;-moz-background-clip:padding-box;background-clip:padding-box}.modal.fade{top:-25%;-webkit-transition:opacity .3s linear,top .3s ease-out;-moz-transition:opacity .3s linear,top .3s ease-out;-o-transition:opacity .3s linear,top .3s ease-out;transition:opacity .3s linear,top .3s ease-out}.modal.fade.in{top:50%}.modal-header{padding:9px 15px;border-bottom:1px solid #eee}.modal-header .close{margin-top:2px}.modal-header h3{margin:0;line-height:30px}.modal-body{max-height:400px;padding:15px;overflow-y:auto}.modal-form{margin-bottom:0}.modal-footer{padding:14px 15px 15px;margin-bottom:0;text-align:right;background-color:#f5f5f5;border-top:1px solid #ddd;-webkit-border-radius:0 0 6px 6px;-moz-border-radius:0 0 6px 6px;border-radius:0 0 6px 6px;*zoom:1;-webkit-box-shadow:inset 0 1px 0 #fff;-moz-box-shadow:inset 0 1px 0 #fff;box-shadow:inset 0 1px 0 #fff}.modal-footer:before,.modal-footer:after{display:table;line-height:0;content:""}.modal-footer:after{clear:both}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.tooltip{position:absolute;z-index:1030;display:block;padding:5px;font-size:11px;opacity:0;filter:alpha(opacity=0);visibility:visible}.tooltip.in{opacity:.8;filter:alpha(opacity=80)}.tooltip.top{margin-top:-3px}.tooltip.right{margin-left:3px}.tooltip.bottom{margin-top:3px}.tooltip.left{margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;text-decoration:none;background-color:#000;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-top-color:#000;border-width:5px 5px 0}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-right-color:#000;border-width:5px 5px 5px 0}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-left-color:#000;border-width:5px 0 5px 5px}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-bottom-color:#000;border-width:0 5px 5px}.popover{position:absolute;top:0;left:0;z-index:1010;display:none;width:236px;padding:1px;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.2);-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,0.2);-moz-box-shadow:0 5px 10px rgba(0,0,0,0.2);box-shadow:0 5px 10px rgba(0,0,0,0.2);-webkit-background-clip:padding-box;-moz-background-clip:padding;background-clip:padding-box}.popover.top{margin-bottom:10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-right:10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;font-weight:normal;line-height:18px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;-webkit-border-radius:5px 5px 0 0;-moz-border-radius:5px 5px 0 0;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover-content p,.popover-content ul,.popover-content ol{margin-bottom:0}.popover .arrow,.popover .arrow:after{position:absolute;display:inline-block;width:0;height:0;border-color:transparent;border-style:solid}.popover .arrow:after{z-index:-1;content:""}.popover.top .arrow{bottom:-10px;left:50%;margin-left:-10px;border-top-color:#fff;border-width:10px 10px 0}.popover.top .arrow:after{bottom:-1px;left:-11px;border-top-color:rgba(0,0,0,0.25);border-width:11px 11px 0}.popover.right .arrow{top:50%;left:-10px;margin-top:-10px;border-right-color:#fff;border-width:10px 10px 10px 0}.popover.right .arrow:after{bottom:-11px;left:-1px;border-right-color:rgba(0,0,0,0.25);border-width:11px 11px 11px 0}.popover.bottom .arrow{top:-10px;left:50%;margin-left:-10px;border-bottom-color:#fff;border-width:0 10px 10px}.popover.bottom .arrow:after{top:-1px;left:-11px;border-bottom-color:rgba(0,0,0,0.25);border-width:0 11px 11px}.popover.left .arrow{top:50%;right:-10px;margin-top:-10px;border-left-color:#fff;border-width:10px 0 10px 10px}.popover.left .arrow:after{right:-1px;bottom:-11px;border-left-color:rgba(0,0,0,0.25);border-width:11px 0 11px 11px}.thumbnails{margin-left:-20px;list-style:none;*zoom:1}.thumbnails:before,.thumbnails:after{display:table;line-height:0;content:""}.thumbnails:after{clear:both}.row-fluid .thumbnails{margin-left:0}.thumbnails>li{float:left;margin-bottom:20px;margin-left:20px}.thumbnail{display:block;padding:4px;line-height:20px;border:1px solid #ddd;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;-webkit-box-shadow:0 1px 3px rgba(0,0,0,0.055);-moz-box-shadow:0 1px 3px rgba(0,0,0,0.055);box-shadow:0 1px 3px rgba(0,0,0,0.055);-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}a.thumbnail:hover{border-color:#08c;-webkit-box-shadow:0 1px 4px rgba(0,105,214,0.25);-moz-box-shadow:0 1px 4px rgba(0,105,214,0.25);box-shadow:0 1px 4px rgba(0,105,214,0.25)}.thumbnail>img{display:block;max-width:100%;margin-right:auto;margin-left:auto}.thumbnail .caption{padding:9px;color:#555}.label,.badge{font-size:11.844px;font-weight:bold;line-height:14px;color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);white-space:nowrap;vertical-align:baseline;background-color:#999}.label{padding:1px 4px 2px;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.badge{padding:1px 9px 2px;-webkit-border-radius:9px;-moz-border-radius:9px;border-radius:9px}a.label:hover,a.badge:hover{color:#fff;text-decoration:none;cursor:pointer}.label-important,.badge-important{background-color:#b94a48}.label-important[href],.badge-important[href]{background-color:#953b39}.label-warning,.badge-warning{background-color:#f89406}.label-warning[href],.badge-warning[href]{background-color:#c67605}.label-success,.badge-success{background-color:#468847}.label-success[href],.badge-success[href]{background-color:#356635}.label-info,.badge-info{background-color:#3a87ad}.label-info[href],.badge-info[href]{background-color:#2d6987}.label-inverse,.badge-inverse{background-color:#333}.label-inverse[href],.badge-inverse[href]{background-color:#1a1a1a}.btn .label,.btn .badge{position:relative;top:-1px}.btn-mini .label,.btn-mini .badge{top:0}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-moz-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-ms-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:0 0}to{background-position:40px 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f7f7f7;background-image:-moz-linear-gradient(top,#f5f5f5,#f9f9f9);background-image:-webkit-gradient(linear,0 0,0 100%,from(#f5f5f5),to(#f9f9f9));background-image:-webkit-linear-gradient(top,#f5f5f5,#f9f9f9);background-image:-o-linear-gradient(top,#f5f5f5,#f9f9f9);background-image:linear-gradient(to bottom,#f5f5f5,#f9f9f9);background-repeat:repeat-x;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#fff5f5f5',endColorstr='#fff9f9f9',GradientType=0);-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);-moz-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);box-shadow:inset 0 1px 2px rgba(0,0,0,0.1)}.progress .bar{float:left;width:0;height:100%;font-size:12px;color:#fff;text-align:center;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#0e90d2;background-image:-moz-linear-gradient(top,#149bdf,#0480be);background-image:-webkit-gradient(linear,0 0,0 100%,from(#149bdf),to(#0480be));background-image:-webkit-linear-gradient(top,#149bdf,#0480be);background-image:-o-linear-gradient(top,#149bdf,#0480be);background-image:linear-gradient(to bottom,#149bdf,#0480be);background-repeat:repeat-x;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff149bdf',endColorstr='#ff0480be',GradientType=0);-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-moz-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;-webkit-transition:width .6s ease;-moz-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress .bar+.bar{-webkit-box-shadow:inset 1px 0 0 rgba(0,0,0,0.15),inset 0 -1px 0 rgba(0,0,0,0.15);-moz-box-shadow:inset 1px 0 0 rgba(0,0,0,0.15),inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 1px 0 0 rgba(0,0,0,0.15),inset 0 -1px 0 rgba(0,0,0,0.15)}.progress-striped .bar{background-color:#149bdf;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;-moz-background-size:40px 40px;-o-background-size:40px 40px;background-size:40px 40px}.progress.active .bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-moz-animation:progress-bar-stripes 2s linear infinite;-ms-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-danger .bar,.progress .bar-danger{background-color:#dd514c;background-image:-moz-linear-gradient(top,#ee5f5b,#c43c35);background-image:-webkit-gradient(linear,0 0,0 100%,from(#ee5f5b),to(#c43c35));background-image:-webkit-linear-gradient(top,#ee5f5b,#c43c35);background-image:-o-linear-gradient(top,#ee5f5b,#c43c35);background-image:linear-gradient(to bottom,#ee5f5b,#c43c35);background-repeat:repeat-x;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ffee5f5b',endColorstr='#ffc43c35',GradientType=0)}.progress-danger.progress-striped .bar,.progress-striped .bar-danger{background-color:#ee5f5b;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-success .bar,.progress .bar-success{background-color:#5eb95e;background-image:-moz-linear-gradient(top,#62c462,#57a957);background-image:-webkit-gradient(linear,0 0,0 100%,from(#62c462),to(#57a957));background-image:-webkit-linear-gradient(top,#62c462,#57a957);background-image:-o-linear-gradient(top,#62c462,#57a957);background-image:linear-gradient(to bottom,#62c462,#57a957);background-repeat:repeat-x;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff62c462',endColorstr='#ff57a957',GradientType=0)}.progress-success.progress-striped .bar,.progress-striped .bar-success{background-color:#62c462;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-info .bar,.progress .bar-info{background-color:#4bb1cf;background-image:-moz-linear-gradient(top,#5bc0de,#339bb9);background-image:-webkit-gradient(linear,0 0,0 100%,from(#5bc0de),to(#339bb9));background-image:-webkit-linear-gradient(top,#5bc0de,#339bb9);background-image:-o-linear-gradient(top,#5bc0de,#339bb9);background-image:linear-gradient(to bottom,#5bc0de,#339bb9);background-repeat:repeat-x;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff339bb9',GradientType=0)}.progress-info.progress-striped .bar,.progress-striped .bar-info{background-color:#5bc0de;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-warning .bar,.progress .bar-warning{background-color:#faa732;background-image:-moz-linear-gradient(top,#fbb450,#f89406);background-image:-webkit-gradient(linear,0 0,0 100%,from(#fbb450),to(#f89406));background-image:-webkit-linear-gradient(top,#fbb450,#f89406);background-image:-o-linear-gradient(top,#fbb450,#f89406);background-image:linear-gradient(to bottom,#fbb450,#f89406);background-repeat:repeat-x;filter:progid:dximagetransform.microsoft.gradient(startColorstr='#fffbb450',endColorstr='#fff89406',GradientType=0)}.progress-warning.progress-striped .bar,.progress-striped .bar-warning{background-color:#fbb450;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.accordion{margin-bottom:20px}.accordion-group{margin-bottom:2px;border:1px solid #e5e5e5;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.accordion-heading{border-bottom:0}.accordion-heading .accordion-toggle{display:block;padding:8px 15px}.accordion-toggle{cursor:pointer}.accordion-inner{padding:9px 15px;border-top:1px solid #e5e5e5}.carousel{position:relative;margin-bottom:20px;line-height:1}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel .item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-moz-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel .item>img{display:block;line-height:1}.carousel .active,.carousel .next,.carousel .prev{display:block}.carousel .active{left:0}.carousel .next,.carousel .prev{position:absolute;top:0;width:100%}.carousel .next{left:100%}.carousel .prev{left:-100%}.carousel .next.left,.carousel .prev.right{left:0}.carousel .active.left{left:-100%}.carousel .active.right{left:100%}.carousel-control{position:absolute;top:40%;left:15px;width:40px;height:40px;margin-top:-20px;font-size:60px;font-weight:100;line-height:30px;color:#fff;text-align:center;background:#222;border:3px solid #fff;-webkit-border-radius:23px;-moz-border-radius:23px;border-radius:23px;opacity:.5;filter:alpha(opacity=50)}.carousel-control.right{right:15px;left:auto}.carousel-control:hover{color:#fff;text-decoration:none;opacity:.9;filter:alpha(opacity=90)}.carousel-caption{position:absolute;right:0;bottom:0;left:0;padding:15px;background:#333;background:rgba(0,0,0,0.75)}.carousel-caption h4,.carousel-caption p{line-height:20px;color:#fff}.carousel-caption h4{margin:0 0 5px}.carousel-caption p{margin-bottom:0}.hero-unit{padding:60px;margin-bottom:30px;background-color:#eee;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.hero-unit h1{margin-bottom:0;font-size:60px;line-height:1;letter-spacing:-1px;color:inherit}.hero-unit p{font-size:18px;font-weight:200;line-height:30px;color:inherit}.pull-right{float:right}.pull-left{float:left}.hide{display:none}.show{display:block}.invisible{visibility:hidden}.affix{position:fixed} \ No newline at end of file diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/css/colors.css b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/css/colors.css deleted file mode 100644 index 8774c02..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/css/colors.css +++ /dev/null @@ -1,302 +0,0 @@ -/*** - - colors.css v2.0.0 - http://clrs.cc - @mrmrs - MIT License - -***/ -/* - - SKINS - - Backgrounds - - Colors - - Border colors - - SVG fills - - SVG Strokes - -*/ -/* Backgrounds */ -.bg-navy { - background-color: #001F3F; } - -.bg-blue { - background-color: #0074D9; } - -.bg-aqua { - background-color: #7FDBFF; } - -.bg-teal { - background-color: #39CCCC; } - -.bg-olive { - background-color: #3D9970; } - -.bg-green { - background-color: #2ECC40; } - -.bg-lime { - background-color: #01FF70; } - -.bg-yellow { - background-color: #FFDC00; } - -.bg-orange { - background-color: #FF851B; } - -.bg-red { - background-color: #FF4136; } - -.bg-fuchsia { - background-color: #F012BE; } - -.bg-purple { - background-color: #B10DC9; } - -.bg-maroon { - background-color: #85144B; } - -.bg-white { - background-color: #fff; } - -.bg-gray { - background-color: #aaa; } - -.bg-silver { - background-color: #ddd; } - -.bg-black { - background-color: #111; } - -/* Colors */ -.navy { - color: #001F3F; } - -.blue { - color: #0074D9; } - -.aqua { - color: #7FDBFF; } - -.teal { - color: #39CCCC; } - -.olive { - color: #3D9970; } - -.green { - color: #2ECC40; } - -.lime { - color: #01FF70; } - -.yellow { - color: #FFDC00; } - -.orange { - color: #FF851B; } - -.red { - color: #FF4136; } - -.fuchsia { - color: #F012BE; } - -.purple { - color: #B10DC9; } - -.maroon { - color: #85144B; } - -.white { - color: #fff; } - -.silver { - color: #ddd; } - -.gray { - color: #aaa; } - -.black { - color: #111; } - -/* Border colors - - Use with another border utility that sets border-width and style - i.e .border { border-width: 1px; border-style: solid; } -*/ -.border--navy { - border-color: #001F3F; } - -.border--blue { - border-color: #0074D9; } - -.border--aqua { - border-color: #7FDBFF; } - -.border--teal { - border-color: #39CCCC; } - -.border--olive { - border-color: #3D9970; } - -.border--green { - border-color: #2ECC40; } - -.border--lime { - border-color: #01FF70; } - -.border--yellow { - border-color: #FFDC00; } - -.border--orange { - border-color: #FF851B; } - -.border--red { - border-color: #FF4136; } - -.border--fuchsia { - border-color: #F012BE; } - -.border--purple { - border-color: #B10DC9; } - -.border--maroon { - border-color: #85144B; } - -.border--white { - border-color: #fff; } - -.border--gray { - border-color: #aaa; } - -.border--silver { - border-color: #ddd; } - -.border--black { - border-color: #111; } - -/* Fills for SVG */ -.fill-navy { - fill: #001F3F; } - -.fill-blue { - fill: #0074D9; } - -.fill-aqua { - fill: #7FDBFF; } - -.fill-teal { - fill: #39CCCC; } - -.fill-olive { - fill: #3D9970; } - -.fill-green { - fill: #2ECC40; } - -.fill-lime { - fill: #01FF70; } - -.fill-yellow { - fill: #FFDC00; } - -.fill-orange { - fill: #FF851B; } - -.fill-red { - fill: #FF4136; } - -.fill-fuchsia { - fill: #F012BE; } - -.fill-purple { - fill: #B10DC9; } - -.fill-maroon { - fill: #85144B; } - -.fill-white { - fill: #fff; } - -.fill-gray { - fill: #aaa; } - -.fill-silver { - fill: #ddd; } - -.fill-black { - fill: #111; } - -/* Strokes for SVG */ -.stroke-navy { - stroke: #001F3F; } - -.stroke-blue { - stroke: #0074D9; } - -.stroke-aqua { - stroke: #7FDBFF; } - -.stroke-teal { - stroke: #39CCCC; } - -.stroke-olive { - stroke: #3D9970; } - -.stroke-green { - stroke: #2ECC40; } - -.stroke-lime { - stroke: #01FF70; } - -.stroke-yellow { - stroke: #FFDC00; } - -.stroke-orange { - stroke: #FF851B; } - -.stroke-red { - stroke: #FF4136; } - -.stroke-fuchsia { - stroke: #F012BE; } - -.stroke-purple { - stroke: #B10DC9; } - -.stroke-maroon { - stroke: #85144B; } - -.stroke-white { - stroke: #fff; } - -.stroke-gray { - stroke: #aaa; } - -.stroke-silver { - stroke: #ddd; } - -.stroke-black { - stroke: #111; } - -/* PRETTIER LINKS */ -a { - text-decoration: none; - -webkit-transition: color .3s ease-in-out; - transition: color .3s ease-in-out; } - -a:link { - -webkit-transition: color .3s ease-in-out; - transition: color .3s ease-in-out; } - -a:visited { } - -a:hover { - color: #001F3F; - -webkit-transition: color .3s ease-in-out; - transition: color .3s ease-in-out; } - -a:active { - -webkit-transition: color .3s ease-in-out; - transition: color .3s ease-in-out; } diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/css/style.css b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/css/style.css deleted file mode 100644 index 82ff12c..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/css/style.css +++ /dev/null @@ -1,271 +0,0 @@ -/* main stylesheet */ - -@import url(http://fonts.googleapis.com/css?family=Signika); - -html { - overflow-y: scroll; -} - -body { - font-size: 15px; - font-family: HelveticaNeue, 'Helvetica Neue', Helvetica, Arial, sans-serif; - color: #332; -} - -h1, h2, h3, h4, h5 { - color: #332; - font-family: "Signika"; - font-weight: 400; - font-size: 1.4em; - line-height: 1.1; - margin-top: 30px; -} - -pre code { - font: 14px/19px Inconsolata, Monaco,"Lucida Console",Terminal,"Courier New",Courier; -} - -.figure { - text-align: center; -} - -.small .figure img { - height: 200px; -} - -.pagetitle .figure { - text-align: left !important; -} - -.pagetitle .figure img { - height: 36px; -} - -table{ - background:#fff; - border:1px solid #ccc; - border-width:2px; - border-collapse:collapse; - margin:5px 0 10px; - - margin-top: 20px; - margin-bottom: 20px; -} - -th, td{ - border:1px solid #ccc; - padding:3px 10px; - text-align:left; - vertical-align:top; -} - -tr.even td{ - background:#f7f7f7; -} - -th{ - background:#edeff0; -} - -td code { - border: 0px; -} - -img { - max-width: 100%; - height: auto; -} - -hr { - border: 0px; - height: 0; - border-bottom: 1px solid #ccc; - margin-bottom: 100px; -} - -/* Logo */ - -.logo { - text-align: center; -} - -.tagline { - font-family: Georgia; - font-size: 18px; - font-style: italic; - line-height: 1.45; - color: #383838; -} - -.author { -} - -.halfbreak { - padding-bottom: 100px; -} - -.break { - padding-bottom: 200px; -} - -/* TOC Links */ - -a { - color: #111111; - text-decoration: none; -} - -.body li a { - text-decoration: underline; -} - -/* Math */ - -.MathJax_Display { - padding-top: 20px; - padding-bottom: 20px; -} - -/* Body Links */ - -p a { - text-decoration: underline; -} - -li code, p code { - font-size: 12px; - border: 1px solid #ccc; - margin-left: 3px; - margin-right: 3px; - padding-left: 2px; - padding-right: 2px; -} - -/* */ - -.center { - text-align: center; -} - -.bigger img { - width: 120%; - height: 120%; -} - -pre { - font-size: 0.9em; - - margin-bottom: 18px; - margin-top: 18px; - - border-left: 1px solid #ccc; - -} - -h1 { - margin-top: 0px; -} - -.annotation { - font-size: 10pt; -} - -.annotation pre { - display: block; - margin: 0; - padding: 7px 10px; - overflow-x: auto; -} - -.annotation.span2 { - /* Override bootstrap */ - margin-left: 0px !important; - margin-top: 18px !important; -} - -.annotation pre code { - border: 0; - padding: 0; - background: transparent; -} - -blockquote { - border-left: 1px solid #ccc; - font-family: Georgia, serif; - font-size: 14px; - font-style: italic; - margin: 0.25em 0; - padding-left: 10px; - line-height: 1.45; - color: #383838; - left: 20px; -} - - -blockquote cite { - color: #999999; - font-size: 14px; - display: block; - margin-top: 5px; -} - -ul.sections { - list-style: none; - padding:0 0 5px 0; - margin:0; -} - -code.sourceCode { - padding: 0; - background: inherit; -} - -pre.sourceCode { - padding: 10px; -} - -ul.sections > li > div { - -moz-box-sizing: border-box; /* firefox */ - -ms-box-sizing: border-box; /* ie */ - -webkit-box-sizing: border-box; /* webkit */ - -khtml-box-sizing: border-box; /* konqueror */ - box-sizing: border-box; /* css3 */ -} - - -/* Make the naviation centered and larger on small screens */ -/*---------------------- (> 481px) ---------------------*/ - -@media only screen and (max-width: 481px) { - -} - -@media only screen and (min-width: 1025px) { - body { - padding: 10px; - } - - .side { - position: fixed; - width: 120px !important; - margin-left: 0px; - z-index: 1000; - } - - .side ul ul { - display: none; - } - - .side ul ul.active { - display: block; - } - - .side .active { - font-weight: bold; - } - - .body { - margin-left: 120px !important; - } - -} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/diagrams/closed_hashing b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/diagrams/closed_hashing deleted file mode 100644 index 5e901b5..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/diagrams/closed_hashing +++ /dev/null @@ -1,28 +0,0 @@ - - - - +----------------+ - |"(keyi, valuei)"| "(key, value) pairs are stored directly" - +----+-----------+ "into the array (no pointers)" - | +--------+---------------------+ - | | | | - | | | | - | | | | - | | | | - | | | | - | +--------+---------------------+ - +---------------> | keyi | valuei | - hasher(keyi) +--------+---------------------+ - | | | - | | | - | | | - +--------+---------------------+ - | | | - +--------+---------------------+ - | | | - | | | - | | | - +--------+---------------------+ - - absl::flat_hash_map - diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/diagrams/closed_hashing.svg b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/diagrams/closed_hashing.svg deleted file mode 100644 index 7c2bd24..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/diagrams/closed_hashing.svg +++ /dev/null @@ -1,133 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -(keyi, valuei) - - - - -hasher(keyi) - - - - -absl::flat_hash_map - - - - -(key, value) pairs are stored directly - - - - -into the array (no pointers) - - - - -keyi - - - - -valuei - - - diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/diagrams/index_computation b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/diagrams/index_computation deleted file mode 100644 index e79cfc1..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/diagrams/index_computation +++ /dev/null @@ -1,29 +0,0 @@ - - - - +----------------+ - |"(keyi, valuei)"| - +------+---------+ - | - | hasher(keyi) "Parallel Hash Map" - v "(with 8 submaps)" - +--------+-------------+ +----------------+ - | h=0x7d84ea13707f4657 | | submap0 | - +---------+------------+ +----------------+ - | | submap1 | - | "(h ^ (h >> 3)) & 0x7" +----------------+ - v | submap2 | - +----+----+ +----------------+ - |"idx = 5"| | submap3 | - +----+----+ +----------------+ - | | submap4 | - | +----------------+ - +------------------------------->| submap5 | - +----------------+ - | submap6 | - +----------------+ - | submap7 | - +----------------+ - - "parallel_hash_map with 8 submaps, each submap is an absl::flat_hash_map" - diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/diagrams/index_computation.svg b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/diagrams/index_computation.svg deleted file mode 100644 index b986a93..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/diagrams/index_computation.svg +++ /dev/null @@ -1,196 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -parallel_hash_map with 8 submaps, each submap is an absl::flat_hash_map - - - - -h=0x7d84ea13707f4657 - - - - -(keyi, valuei) - - - - -idx = 5 - - - - -(h ^ (h >> 3)) & 0x7 - - - - -hasher(keyi) - - - - -Parallel Hash Map - - - - -(with 8 submaps) - - - - -submap0 - - - - -submap1 - - - - -submap2 - - - - -submap3 - - - - -submap4 - - - - -submap5 - - - - -submap6 - - - - -submap7 - - - diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/closed_hashing.png b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/closed_hashing.png deleted file mode 100644 index 0662041..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/closed_hashing.png and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_mem_usage.gif b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_mem_usage.gif deleted file mode 100644 index f14b098..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_mem_usage.gif and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_mem_usage.png b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_mem_usage.png deleted file mode 100644 index 800a53c..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_mem_usage.png and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_par_mutex_4.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_par_mutex_4.PNG deleted file mode 100644 index 4f7cb5a..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_par_mutex_4.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_par_mutex_5.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_par_mutex_5.PNG deleted file mode 100644 index e437872..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_par_mutex_5.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_par_mutex_5_speed.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_par_mutex_5_speed.PNG deleted file mode 100644 index 4768a20..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_par_mutex_5_speed.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_par_mutex_6_speed.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_par_mutex_6_speed.PNG deleted file mode 100644 index af1b2ae..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_par_mutex_6_speed.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_par_speed.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_par_speed.PNG deleted file mode 100644 index df36c83..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_par_speed.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_peak.gif b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_peak.gif deleted file mode 100644 index eefbad6..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_peak.gif and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_peak.png b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_peak.png deleted file mode 100644 index 167edb8..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/flat_peak.png and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/hashtable_benchmarks.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/hashtable_benchmarks.PNG deleted file mode 100644 index d18a31b..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/hashtable_benchmarks.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/idx_computation_cost.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/idx_computation_cost.PNG deleted file mode 100644 index 7e78e8f..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/idx_computation_cost.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/index_computation.png b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/index_computation.png deleted file mode 100644 index 550d743..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/index_computation.png and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/lock_various_sizes.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/lock_various_sizes.PNG deleted file mode 100644 index e1a486e..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/lock_various_sizes.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_both.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_both.PNG deleted file mode 100644 index b78efe6..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_both.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_both_run2.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_both_run2.PNG deleted file mode 100644 index 1e024b1..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_both_run2.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_mem.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_mem.PNG deleted file mode 100644 index 8e34553..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_mem.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_mem_run2.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_mem_run2.PNG deleted file mode 100644 index 27281e7..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_mem_run2.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_mem_run2_zoomed.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_mem_run2_zoomed.PNG deleted file mode 100644 index bd56cbe..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_mem_run2_zoomed.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_mem_zoomed.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_mem_zoomed.PNG deleted file mode 100644 index bd7d34c..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_mem_zoomed.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_speed.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_speed.PNG deleted file mode 100644 index 4aece96..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_speed.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_speed_run2.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_speed_run2.PNG deleted file mode 100644 index 38d9882..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/mt_stl_flat_par_speed_run2.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/no_preselection.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/no_preselection.PNG deleted file mode 100644 index 22fa8d4..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/no_preselection.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/node_mem_usage.gif b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/node_mem_usage.gif deleted file mode 100644 index 77dec05..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/node_mem_usage.gif and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/node_mem_usage.png b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/node_mem_usage.png deleted file mode 100644 index 15cb1e0..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/node_mem_usage.png and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/node_peak.gif b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/node_peak.gif deleted file mode 100644 index 82e47ec..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/node_peak.gif and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/node_peak.png b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/node_peak.png deleted file mode 100644 index e818cb3..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/node_peak.png and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/par_align_test.png b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/par_align_test.png deleted file mode 100644 index b28ae35..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/par_align_test.png and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/par_mt_memory.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/par_mt_memory.PNG deleted file mode 100644 index c0c955b..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/par_mt_memory.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/par_mt_speed.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/par_mt_speed.PNG deleted file mode 100644 index f3ad52a..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/par_mt_speed.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/parallel_flat_peak.gif b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/parallel_flat_peak.gif deleted file mode 100644 index 45e8ee6..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/parallel_flat_peak.gif and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/parallel_flat_peak.png b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/parallel_flat_peak.png deleted file mode 100644 index 019104e..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/parallel_flat_peak.png and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/parallel_node_peak.gif b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/parallel_node_peak.gif deleted file mode 100644 index c968685..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/parallel_node_peak.gif and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/parallel_node_peak.png b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/parallel_node_peak.png deleted file mode 100644 index 9efca9a..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/parallel_node_peak.png and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/phash.png b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/phash.png deleted file mode 100644 index 18d1894..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/phash.png and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/phmap_logo.png b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/phmap_logo.png deleted file mode 100644 index 2331458..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/phmap_logo.png and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/spp_flat_par_both.png b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/spp_flat_par_both.png deleted file mode 100644 index ae040b1..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/spp_flat_par_both.png and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_both.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_both.PNG deleted file mode 100644 index 5799f5c..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_both.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_mem.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_mem.PNG deleted file mode 100644 index 96fb4b8..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_mem.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_par_both.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_par_both.PNG deleted file mode 100644 index c92bca1..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_par_both.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_par_mem.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_par_mem.PNG deleted file mode 100644 index 87e6eca..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_par_mem.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_par_mem_zoomed.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_par_mem_zoomed.PNG deleted file mode 100644 index 6424b7f..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_par_mem_zoomed.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_par_speed.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_par_speed.PNG deleted file mode 100644 index 12f18e3..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_par_speed.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_speed.PNG b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_speed.PNG deleted file mode 100644 index b7a0bce..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/img/stl_flat_speed.PNG and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/includes.hs b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/includes.hs deleted file mode 100644 index 1edf2dc..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/includes.hs +++ /dev/null @@ -1,65 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - -import Text.Read -import Control.Monad.State -import Control.Monad -import Text.Pandoc -import Data.Monoid -import Control.Applicative - -import Text.Pandoc.JSON -import Text.Pandoc.Walk - -slice :: Int -> Int -> [a] -> [a] -slice from to xs = take (to - from + 1) (drop from xs) - -doSlice :: Block -> IO Block -doSlice cb@(CodeBlock (id, classes, namevals) contents) = do - res <- return $ do - upper <- readMaybe =<< lookup "upper" namevals - lower <- readMaybe =<< lookup "lower" namevals - file <- lookup "slice" namevals - return (upper, lower, file) - - case res of - Nothing -> return cb - Just (upper, lower, f) -> do - contents <- readFile f - let lns = unlines $ slice lower upper (lines contents) - return (CodeBlock (id, classes, namevals) lns) -doSlice x = return x - -doInclude :: Block -> IO Block -doInclude cb@(CodeBlock (id, classes, namevals) contents) = - case lookup "include" namevals of - Just f -> return . (CodeBlock (id, classes, namevals)) =<< readFile f - Nothing -> return cb -doInclude x = return x - -doHtml :: Block -> IO Block -doHtml cb@(CodeBlock (id, classes, namevals) contents) = - case lookup "literal" namevals of - Just f -> return . (RawBlock "html") =<< readFile f - Nothing -> return cb -doHtml x = return x - -injectLatexMacros :: Maybe Format -> Pandoc -> IO Pandoc -injectLatexMacros (Just fmt) p = do - macros <- readFile "latex_macros" - let block = - case fmt of - Format "html" -> - Div ("",[],[("style","display:none")]) . (:[]) - . Para . (:[]) . Math DisplayMath $ macros - Format "latex" -> RawBlock "latex" macros - Format "epub" -> RawBlock "latex" macros - _ -> RawBlock "latex" macros - return (Pandoc nullMeta [block] <> p) -injectLatexMacros _ _ = return mempty - -main :: IO () -main = toJSONFilter - ((\fmt -> injectLatexMacros fmt - >=> walkM doInclude - >=> walkM doSlice - >=> walkM doHtml) :: Maybe Format -> Pandoc -> IO Pandoc) diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/latex_macros b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/latex_macros deleted file mode 100644 index 396f08f..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/latex_macros +++ /dev/null @@ -1,15 +0,0 @@ -\newcommand{\andalso}{\quad\quad} -\newcommand{\infabbrev}[2]{\infax{#1 \quad\eqdef\quad #2}} -\newcommand{\infrule}[2]{\displaystyle \dfrac{#1}{#2}} -\newcommand{\ar}{\rightarrow} -\newcommand{\Int}{\mathtt{Int}} -\newcommand{\Bool}{\mathtt{Bool}} -\newcommand{\becomes}{\Downarrow} -\newcommand{\trule}[1]{(\textbf{#1})} -\newcommand{\FV}[1]{\mathtt{fv}(#1)} -\newcommand{\FTV}[1]{\mathtt{ftv}(#1)} -\newcommand{\BV}[1]{\mathtt{bv}(#1)} -\newcommand{\compiles}[1]{\text{C}\llbracket{#1}\rrbracket} -\newcommand{\exec}[1]{\text{E}\llbracket{#1}\rrbracket} -\renewcommand{\t}[1]{\mathtt{#1}} -\newcommand{\ite}[3]{\text{if }#1\text{ then }#2\text{ else }#3} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/parallel_hashmap.html b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/parallel_hashmap.html deleted file mode 100644 index 9599923..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/parallel_hashmap.html +++ /dev/null @@ -1,343 +0,0 @@ - - - - - The Parallel Hashmap (Gregory Popovitch) - - - - - - - - - - - - - - - - -

- -
- -
- -
-

\[\newcommand{\andalso}{\quad\quad} -\newcommand{\infabbrev}[2]{\infax{#1 \quad\eqdef\quad #2}} -\newcommand{\infrule}[2]{\displaystyle \dfrac{#1}{#2}} -\newcommand{\ar}{\rightarrow} -\newcommand{\Int}{\mathtt{Int}} -\newcommand{\Bool}{\mathtt{Bool}} -\newcommand{\becomes}{\Downarrow} -\newcommand{\trule}[1]{(\textbf{#1})} -\newcommand{\FV}[1]{\mathtt{fv}(#1)} -\newcommand{\FTV}[1]{\mathtt{ftv}(#1)} -\newcommand{\BV}[1]{\mathtt{bv}(#1)} -\newcommand{\compiles}[1]{\text{C}\llbracket{#1}\rrbracket} -\newcommand{\exec}[1]{\text{E}\llbracket{#1}\rrbracket} -\renewcommand{\t}[1]{\mathtt{#1}} -\newcommand{\ite}[3]{\text{if }#1\text{ then }#2\text{ else }#3} -\]

-
-

The Parallel Hashmap

-

or Abseiling from the shoulders of giants - © Gregory Popovitch - March 10, 2019

-

[tl;dr] We present a novel hashmap design, the Parallel Hashmap. Built on top of Abseil's flat_hash_map, the Parallel Hashmap has lower space requirements, is nearly as fast as the underlying flat_hash_map, and can be used from multiple threads with high levels of concurrency.

-

A quick look at the current state of the art

-

If you haven't been living under a rock, you know that Google open sourced late last year their Abseil library, which includes a very efficient flat hash table implementation. The absl::flat_hash_map stores the values directly in a memory array, which avoids memory indirections (this is referred to as closed hashing).

-

closed_hashing

-

Using parallel SSE2 instructions, the flat hash table is able to look up items by checking 16 slots in parallel, which allows the implementation to remain fast even when the table is filled to 87.5% capacity.

-

The graphs below show a comparison of time and memory usage necessary to insert up to 100 million values (each value is composed of two 8-byte integers), between the default hashmap of Visual Studio 2017 (std::unordered_map), and Abseil's flat_hash_map:

-

stl_flat comparison

-

On the bottom graph, we can see that, as expected, the Abseil flat_hash_map is significantly faster that the default stl implementation, typically about three times faster.

-

The peak memory usage issue

-

The top graph shown the memory usage for both tables.

-

I used a separate thread to monitor the memory usage, which allows to track the increased memory usage when the table resizes. Indeed, both tables have a peak memory usage that is significantly higher than the memory usage seen between insertions.

-

In the case of Abseil's flat_hash_map, the values are stored directly in a memory array. The memory usage is constant until the table needs to resize, which is why we see these horizontal sections of memory usage.

-

When the flat_hash_map reaches 87.5% occupancy, a new array of twice the size is allocated, the values are moved (rehashed) from the smaller to the larger array, and then the smaller array, now empty, is freed. So we see that during the resize, the occupancy is only one third of 87.5%, or 29.1%, and when the smaller array is released, occupancy is half of 87.5% or 43.75%.

-

The default STL implementation is also subject to this higher peak memory usage, since it typically is implemented with an array of buckets, each bucket having a pointer to a linked list of nodes containing the values. In order to maintain O(1) lookups, the array of buckets also needs to be resized as the table size grows, requiring a 3x temporary memory requirement for moving the old bucket array (1x) to the newly allocated, larger (2x) array. In between the bucket array resizes, the default STL implementation memory usage grows at a constant rate as new values are added to the linked lists.

-
-

Instead of having a separate linked list for each bucket, std::unordered_map implementations often use a single linked list (making iteration faster), with buckets pointing to locations within the single linked list. absl::node_hash_map, on the other hand, has each bucket pointing to a single value, and collisions are handled with open addressing like for the absl::flat_hash_map.

-
-

This peak memory usage can be the limiting factor for large tables. Suppose you are on a machine with 32 GB of ram, and the flat_hash_map needs to resize when you inserted 10 GB of values in it. 10 GB of values means the array size is 11.42 GB (resizing at 87.5% occupancy), and we need to allocate a new array of double size (22.85 GB), which obviously will not be possible on our 32 GB machine.

-

For my work developing mechanical engineering software, this has kept me from using flat hash maps, as the high peak memory usage was the limiting factor for the size of FE models which could be loaded on a given machine. So I used other types of maps, such as sparsepp or Google's cpp-btree.

-

When the Abseil library was open sourced, I started pondering the issue again. Compared to Google's old dense_hash_map which resized at 50% capacity, the new absl::flat_hash_map resizing at 87.5% capacity was more memory friendly, but it still had these significant peaks of memory usage when resizing.

-

If only there was a way to eliminate those peaks, the flat_hash_map would be close to perfect. But how?

-

The peak memory usage solution

-

Suddenly, it hit me. I had a solution. I would create a hash table that internally is made of an array of 16 hash tables (the submaps). When inserting or looking up an item, the index of the target submap would be decided by the hash of the value to insert. For example, if for a given size_t hashval, the index for the internal submap would be computed with:

-

submap_index = (hashval ^ (hashval >> 4)) & 0xF;

-

providing an index between 0 and 15.

-
-

In the actual implementation, the size of the array of hash tables is configurable to a power of two, so it can be 2, 4, 8, 16, 32, ... The following illustration shows a parallel_hash_map with 8 submaps.

-
-

index_computation

-

The benefit of this approach would be that the internal tables would each resize on its own when they reach 87.5% capacity, and since each table contains approximately one sixteenth of the values, the memory usage peak would be only one sixteenth of the size we saw for the single flat_hash_map.

-

The rest of this article describes my implementation of this concept that I have done inside the Abseil library (I have submitted a pull request in the hope it will be merged into the main Abseil codebase). The current name for it is parallel_flat_hash_map or parallel_flat_hash_set. It does provide the same external API as Abseil's other hash tables, and internally it uses a std::array of N flat_hash_maps.

-

I was delighted to find out that not only the parallel_flat_hash_map has significant memory usage benefits compared to the flat_hash_map, but it also has significant advantages for concurrent programming as I will show later.

-
-

I will use the names parallel_hash_map and parallel_flat_hash_map interchangably. They refer to the same data structure. The name used in my Abseil fork is absl::parallel_flat_hash_map, as it may be desirable to also provide a absl::parallel_node_hash_map.

-
-

The Parallel Hashmap: memory usage

-

So, without further ado, let's see the same graphs graphs as above, with the addition of the parallel_flat_hash_map. Let us first look at memory usage (the second graph provides a "zoomed-in" view of the location where resizing occurs):

-

stl_flat_par comparison

-

stl_flat_par_zoomed comparison

-

We see that the parallel_hash_map behaves as expected. The memory usage matches exactly the memory usage of its base flat_hash_map, except that the peaks of memory usage which occur when the table resizes are drastically reduced, to the point that they are not objectionable anymore. In the "zoomed-in" view, we can see the sixteen dots corresponding to each of the individual submaps resizing. The fact that those resizes are occuring at roughly the same x location in the graph shows that we have a good hash function distribution, distributing the values evenly between the sixteen individual submaps.

-

The Parallel Hashmap: speed

-

But what about the speed? After all, for each value inserted into the parallel hashmap, we have to do some extra work (steps 1 and 2 below):

-
    -
  1. compute the hash for the value to insert
  2. -
  3. compute the index of the target submap from the hash)
  4. -
  5. insert the value into the submap
  6. -
-

The first step (compute the hash) is the most problematic one, as it can potentially be costly. As we mentioned above, the second step (computing the index from the hash) is very simple and its cost in minimal (3 processor instruction as shown below in Matt Godbolt's compiler explorer):

-

index computation cost

-

As for the hash value computation, fortunately we can eliminate this cost by providing the computed hash to the submap functions, so that it is computed only once. This is exactly what I have done in my implementation of the parallel_hash_map within the Abseil library, adding a few extra APIs to the Abseil internal raw_hash_map.h header, which allow the parallel_hash_map to pass the precomputed hash value to the underlying submaps.

-

So we have all but eliminated the cost of the first step, and seen that the cost of the second step is very minimal. At this point we expect that the parallel_hash_map performance will be close to the one of its underlying flat_hash_map, and this is confirmed by the chart below:

-

stl_flat_par comparison

-

Indeed, because of the scale is somewhat compressed due to the longer times of the std::unordered_map, we can barely distinguish between the blue curve of the flat_hash_map and the red curve of the parallel_hash_map. So let's look at a graph without the std::unordered_map:

-

flat_par comparison

-

This last graph shows that the parallel_hash_map is slightly slower especially for smaller table sizes. For a reason not obvious to me (maybe better memory locality), the speeds of the parallel_hash_map and flat_hash_map are essentially undistinguishable for larger map sizes (> 80 million values).

-

Are we done yet?

-

This is already looking pretty good. For large hash_maps, the parallel_flat_hash_map is a very appealing solution, as it provides essentially the excellent performance of the flat_hash_map, while virtually eliminating the peaks of memory usage which occur when the hash table resizes.

-

But there is another aspect of the inherent parallelism of the parallel_hash_map which is interesting to explore. As we know, typical hashmaps cannot be modified from multiple threads without explicit synchronization. And bracketing write accesses to a shared hash_map with synchronization primitives, such as mutexes, can reduce the concurrency of our program, and even cause deadlocks.

-

Because the parallel_hash_map is made of sixteen separate submaps, it posesses some intrinsic parallelism. Indeed, suppose you can make sure that different threads will use different submaps, you would be able to insert into the same parallel_hash_map at the same time from the different threads without any locking.

-

Using the intrinsic parallelism of the parallel_hash_map to insert values from multiple threads, lock free.

-

So, if you can iterate over the values you want to insert into the hash table, the idea is that each thread will iterate over all values, and then for each value:

-
    -
  1. compute the hash for that value
  2. -
  3. compute the submap index for that hash
  4. -
  5. if the submap index is one assigned to this thread, then insert the value, otherwise do nothing and continue to the next value
  6. -
-

Here is the code for the single-threaded insert:

- -

and here is the code for the multi-threaded insert:

-
template <class HT>
-void _fill_random_inner_mt(int64_t cnt, HT &hash, RSU &rsu)
-{
-    constexpr int64_t num_threads = 8;   // has to be a power of two
-    std::unique_ptr<std::thread> threads[num_threads];
-
-    auto thread_fn = [&hash, cnt, num_threads](int64_t thread_idx, RSU rsu) {
-        typename HT::hasher hasher;                         // get hasher object from the hash table
-        size_t modulo = hash.subcnt() / num_threads;        // subcnt() returns the number of submaps
-
-        for (int64_t i=0; i<cnt; ++i)                       // iterate over all values
-        {
-            unsigned int key = rsu.next();                  // get next key to insert
-            size_t hashval = hash.hash(key);                // compute its hash
-            size_t idx  = hash.subidx(hashval);             // compute the submap index for this hash
-            if (idx / modulo == thread_idx)                 // if the submap is suitable for this thread
-            {
-                hash.insert(typename HT::value_type(key, 0)); // insert the value
-                ++(num_keys[thread_idx]);                     // increment count of inserted values
-            }
-        }
-    };
-
-    // create and start 8 threads - each will insert in their own submaps
-    // thread 0 will insert the keys whose hash direct them to submap0 or submap1
-    // thread 1 will insert the keys whose hash direct them to submap2 or submap3
-    // --------------------------------------------------------------------------
-    for (int64_t i=0; i<num_threads; ++i)
-        threads[i].reset(new std::thread(thread_fn, i, rsu));
-
-    // rsu passed by value to threads... we need to increment the reference object
-    for (int64_t i=0; i<cnt; ++i)
-        rsu.next();
-    
-    // wait for the threads to finish their work and exit
-    for (int64_t i=0; i<num_threads; ++i)
-        threads[i]->join();
-}
-

Using multiple threads, we are able to populate the parallel_flat_hash_map (inserting 100 million values) three times faster than the standard flat_hash_map (which we could not have populated from multiple threads without explicit locks, which would have prevented performance improvements).

-

And the graphical visualization of the results:

-

mt_stl_flat_par comparison

-

We notice in this last graph that the memory usage peaks, while still smaller than those of the flat_hash_map, are larger that those we saw when populating the parallel_hash_map using a single thread. The obvious reason is that, when using a single thread, only one of the submaps would resize at a time, ensuring that the peak would only be 1/16th of the one for the flat_hash_map (provided of course that the hash function distributes the values somewhat evenly between the submaps).

-

When running in multi-threaded mode (in this case eight threads), potentially as many as eight submaps can resize simultaneaously, so for a parallel_hash_map with sixteen submaps the memory peak size can be half as large as the one for the flat_hash_map.

-

Still, this is a pretty good result, we are now inserting values into our parallel_hash_map three times faster than we were able to do using the flat_hash_map, while using a lower memory ceiling.

-

This is significant, as the speed of insertion into a hash map is important in many algorithms, for example removing duplicates in a collection of values.

-

Using the intrinsic parallelism of the parallel_hash_map with internal mutexes

-

It may not be practical to add logic into your program to ensure you use different internal submaps from each thread. Still, locking the whole parallel_hash_map for each access would forego taking advantage of its intrinsic parallelism.

-

For that reason, the parallel_hash_map can provide internal locking using the absl::Mutex (the default template parameter is absl::NullMutex, which does no locking and has no size cost). When selecting absl::Mutex, one mutex is created for each internal submap at a cost of 8 bytes per submap, and the parallel_hash_map internally protects each submap access with its associated mutex.

-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
mapNumber of submapssizeof(map)
std::unordered_map (vs2017)-64
absl::flat_hash_map-48
absl::parallel_flat_hash_map, N=4, absl::NullMutex16768
absl::parallel_flat_hash_map, N=4, absl::Mutex16896
-

It is about time we provide the complete parallel_flat_hash_map class declaration (the declaration for parallel_flat_hash_set is similar):

-
template <class K, class V,
-          class Hash      = absl::priv::hash_default_hash<K>,
-          class Eq        = absl::priv::hash_default_eq<K>,
-          class Allocator = std::allocator<std::pair<const K, V>>,
-          size_t N        = 4,                 // 2**N submaps
-          class Mutex     = absl::NullMutex>   // use absl::Mutex to enable internal locks
-class parallel_flat_hash_map;
-
-

Let's see what result we get for the insertion of random values from multiple threads, however this time we create a parallel_hash_map with internal locking (by providing absl::Mutex as the last template argument), and modify the code so that each thread inserts values in any submap (no pre-selection).

-

no_preselection

-

If we were to do a intensive insertion test into a hash map from multiple threads, where we lock the whole hash table for each insertion, we would be likely to get even worse results than for a single threaded insert, because of heavy lock contention.

-

In this case, our expectation is that the finer grained locking of the parallel_hash_map (separate locks for each internal submap) will provide a speed benefit when compared to the single threaded insertion, and this is indeed what the benchmarks show:

-

flat_par_mutex_4

-

Interestingly, we notice that the memory peaks (when resizing occur) are again very small, in the order of 1/16th of those for the flat_hash_map. This is likely because, as soon as one of the submaps resizes (which takes much longer than a regular insertion), the other threads very soon have to wait on the resizing submap's mutex for an insertion, before they reach their own resizing threashold.

-

Since threads statistically will insert on a different submap for each value, it would be a surprising coincidence indeed if two submaps reached their resizing threshold without the resizing of the first submap blocking all the other threads first.

-

If we increase the number of submaps, we should see more parallelism (less lock contention across threads, as the odds of two separate threads inserting in the same subhash is lower), but with diminishing returns as every submap resize will quickly block the other threads until the resize is completed.

-

This is indeed what we see:

-

lock_various_sizes

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
mapNumber of submapssizeof(map)time 100M insertions
absl::flat_hash_map-4814.77s
absl::parallel_flat_hash_map, N=4, absl::Mutex168968.36s
absl::parallel_flat_hash_map, N=5, absl::Mutex3217927.14s
absl::parallel_flat_hash_map, N=6, absl::Mutex6435846.61s
-

There is still some overhead from the mutex lock/unlock, and the occasional lock contention, which prevents us from reaching the performance of the previous multithreaded lock-free insertion (5.12s for inserting 100M elements).

-

In Conclusion

-

We have seen that the novel parallel hashmap approach, used within a single thread, provides significant space advantages, with a very minimal time penalty. When used in a multi-thread context, the parallel hashmap still provides a significant space benefit, in addition to a consequential time benefit by reducing (or even eliminating) lock contention when accessing the parallel hashmap.

-

Future work

-
    -
  1. It would be beneficial to provide additional APIs for the parallel_flat_hash_map and parallel_flat_hash_set taking a precomputed hash value. This would enable the lock-free usage of the parallel_flat_hash_map, described above for multi-threaded environments, without requiring a double hash computation.

  2. -
  3. We may consider providing parallel_node_hash_map and parallel_node_hash_set in Abseil, for the cases when pointer stability is required for keys and/or values. This would be a simple addition.

  4. -
-

Thanks

-

I would like to thank Google's Matt Kulukundis for his eye-opening presentation of the flat_hash_map design at CPPCON 2017 - my frustration with not being able to use it helped trigger my insight into the parallel_hash_map. Also many thanks to the Abseil container developers - I believe the main contributors are Alkis Evlogimenos and Roman Perepelitsa - who created an excellent codebase into which the graft of this new hashmap took easily, and finally to Google for open-sourcing Abseil. Thanks also to my son Andre for reviewing this paper, and for his patience when I was rambling about the parallel_hash_map and its benefits.

- -

Github repository for the benchmark code used in this paper

-

Swiss Tables doc

-

My fork of Google Abseil repository, with the parallel_flat_hash_map implementation

-

Google Abseil repository

-

Matt Kulukindis: Designing a Fast, Efficient, Cache-friendly Hash Table, Step by Step

- - - - - - - diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/parallel_hashmap.md b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/parallel_hashmap.md deleted file mode 100644 index d0b5345..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/parallel_hashmap.md +++ /dev/null @@ -1,274 +0,0 @@ -# The Parallel Hashmap - or Abseiling from the shoulders of giants - © Gregory Popovitch - March 10, 2019 - -[tl;dr] We present a novel hashmap design, the Parallel Hashmap. Built on a modified version of Abseil's *flat_hash_map*, the Parallel Hashmap has lower space requirements, is nearly as fast as the underlying *flat_hash_map*, and can be used from multiple threads with high levels of concurrency. The [parallel hashmap](https://github.com/greg7mdp/parallel-hashmap) repository provides header-only version of the flat and node hashmaps, and their parallel versions as well. - -### A quick look at the current state of the art - -If you haven't been living under a rock, you know that Google open sourced late last year their Abseil library, which includes a very efficient flat hash table implementation. The *absl::flat_hash_map* stores the values directly in a memory array, which avoids memory indirections (this is referred to as closed hashing). - -![closed_hashing](https://github.com/greg7mdp/parallel-hashmap/blob/master/html/img/closed_hashing.png?raw=true) - -Using parallel SSE2 instructions, the flat hash table is able to look up items by checking 16 slots in parallel, which allows the implementation to remain fast even when the table is filled to 87.5% capacity. - -The graphs below show a comparison of time and memory usage necessary to insert up to 100 million values (each value is composed of two 8-byte integers), between the default hashmap of Visual Studio 2017 (std::unordered_map), and Abseil's flat_hash_map: - -![stl_flat comparison](https://github.com/greg7mdp/parallel-hashmap/blob/master/html/img/stl_flat_both.PNG?raw=true) - -On the bottom graph, we can see that, as expected, the Abseil *flat_hash_map* is significantly faster that the default stl implementation, typically about three times faster. - -### The peak memory usage issue - -The top graph shown the memory usage for both tables. - -I used a separate thread to monitor the memory usage, which allows to track the increased memory usage when the table resizes. Indeed, both tables have a peak memory usage that is significantly higher than the memory usage seen between insertions. - -In the case of Abseil's *flat_hash_map*, the values are stored directly in a memory array. The memory usage is constant until the table needs to resize, which is why we see these horizontal sections of memory usage. - -When the *flat_hash_map* reaches 87.5% occupancy, a new array of twice the size is allocated, the values are moved (rehashed) from the smaller to the larger array, and then the smaller array, now empty, is freed. So we see that during the resize, the occupancy is only one third of 87.5%, or 29.1%, and when the smaller array is released, occupancy is half of 87.5% or 43.75%. - -The default STL implementation is also subject to this higher peak memory usage, since it typically is implemented with an array of buckets, each bucket having a pointer to a linked list of nodes containing the values. In order to maintain O(1) lookups, the array of buckets also needs to be resized as the table size grows, requiring a 3x temporary memory requirement for moving the old bucket array (1x) to the newly allocated, larger (2x) array. In between the bucket array resizes, the default STL implementation memory usage grows at a constant rate as new values are added to the linked lists. - -> Instead of having a separate linked list for each bucket, *std::unordered_map* implementations often use a single linked list (making iteration faster), with buckets pointing to locations within the single linked list. *absl::node_hash_map*, on the other hand, has each bucket pointing to a single value, and collisions are handled with open addressing like for the *absl::flat_hash_map*. - -This peak memory usage can be the limiting factor for large tables. Suppose you are on a machine with 32 GB of ram, and the *flat_hash_map* needs to resize when you inserted 10 GB of values in it. 10 GB of values means the array size is 11.42 GB (resizing at 87.5% occupancy), and we need to allocate a new array of double size (22.85 GB), which obviously will not be possible on our 32 GB machine. - -For my work developing mechanical engineering software, this has kept me from using flat hash maps, as the high peak memory usage was the limiting factor for the size of FE models which could be loaded on a given machine. So I used other types of maps, such as [sparsepp](https://github.com/greg7mdp/sparsepp) or Google's [cpp-btree](https://code.google.com/archive/p/cpp-btree/). - -When the Abseil library was open sourced, I started pondering the issue again. Compared to Google's old dense_hash_map which resized at 50% capacity, the new *absl::flat_hash_map* resizing at 87.5% capacity was more memory friendly, but it still had these significant peaks of memory usage when resizing. - -If only there was a way to eliminate those peaks, the *flat_hash_map* would be close to perfect. But how? - -### The peak memory usage solution - -Suddenly, it hit me. I had a solution. I would create a hash table that internally is made of an array of 16 hash tables (the submaps). When inserting or looking up an item, the index of the target submap would be decided by the hash of the value to insert. For example, if for a given `size_t hashval`, the index for the internal submap would be computed with: - -`submap_index = (hashval ^ (hashval >> 4)) & 0xF;` - -providing an index between 0 and 15. - -> In the actual implementation, the size of the array of hash tables is configurable to a power of two, so it can be 2, 4, 8, 16, 32, ... The following illustration shows a parallel_hash_map with 8 submaps. - -![index_computation](https://github.com/greg7mdp/parallel-hashmap/blob/master/html/img/index_computation.png?raw=true) - -The benefit of this approach would be that the internal tables would each resize on its own when they reach 87.5% capacity, and since each table contains approximately one sixteenth of the values, the memory usage peak would be only one sixteenth of the size we saw for the single *flat_hash_map*. - -The rest of this article describes my implementation of this concept that I have done in my [parallel hashmap](https://github.com/greg7mdp/parallel-hashmap) repository. This is a header only library, which provides the following eight hashmaps: - -- phmap::flat_hash_set -- phmap::flat_hash_map -- phmap::node_hash_set -- phmap::node_hash_map -- phmap::parallel_flat_hash_set -- phmap::parallel_flat_hash_map -- phmap::parallel_node_hash_set -- phmap::parallel_node_hash_map - -This implementation requires a C++11 compatible compiler, and provides full compatibility with the std::unordered_map (with the exception of *pointer stability* for the `flat` versions. C++14 and C++17 methods, like `try-emplace`, are provided as well. -The names for it are *parallel_flat_hash_map* or *parallel_flat_hash_set*, and the *node* equivalents. These hashmaps provide the same external API as the *flat_hash_map*, and internally use a std::array of 2**N *flat_hash_maps*. - -I was delighted to find out that not only the *parallel_flat_hash_map* has significant memory usage benefits compared to the *flat_hash_map*, but it also has significant advantages for concurrent programming as I will show later. In the rest of this article, we will focus on the *parallel_flat_hash_map*, but similar results are seen for the *parallel_node_hash_map*, and the *set* versions of course. - - -### The Parallel Hashmap: memory usage - -So, without further ado, let's see the same graphs graphs as above, with the addition of the *parallel_flat_hash_map*. Let us first look at memory usage (the second graph provides a "zoomed-in" view of the location where resizing occurs): - -![stl_flat_par comparison](https://github.com/greg7mdp/parallel-hashmap/blob/master/html/img/stl_flat_par_mem.PNG?raw=true) - -![stl_flat_par_zoomed comparison](https://github.com/greg7mdp/parallel-hashmap/blob/master/html/img/stl_flat_par_mem_zoomed.PNG?raw=true) - -We see that the *parallel_flat_hash_map* behaves as expected. The memory usage matches exactly the memory usage of its base *flat_hash_map*, except that the peaks of memory usage which occur when the table resizes are drastically reduced, to the point that they are not objectionable anymore. In the "zoomed-in" view, we can see the sixteen dots corresponding to each of the individual submaps resizing. The fact that those resizes are occuring at roughly the same x location in the graph shows that we have a good hash function distribution, distributing the values evenly between the sixteen individual submaps. - - -### The Parallel Hashmap: speed - -But what about the speed? After all, for each value inserted into the parallel hashmap, we have to do some extra work (steps 1 and 2 below): -1. compute the hash for the value to insert -2. compute the index of the target submap from the hash) -3. insert the value into the submap - -The first step (compute the hash) is the most problematic one, as it can potentially be costly. As we mentioned above, the second step (computing the index from the hash) is very simple and its cost in minimal (3 processor instruction as shown below in *Matt Godbolt*'s compiler explorer): - -![index computation cost](https://github.com/greg7mdp/parallel-hashmap/blob/master/html/img/idx_computation_cost.PNG?raw=true) - -As for the hash value computation, fortunately we can eliminate this cost by providing the computed hash to the submap functions, so that it is computed only once. This is exactly what I have done in my implementation of the *parallel_flat_hash_map*, adding a few extra APIs to the internal raw_hash_map.h header, which allow the *parallel_flat_hash_map* to pass the precomputed hash value to the underlying submaps. - -So we have all but eliminated the cost of the first step, and seen that the cost of the second step is very minimal. At this point we expect that the *parallel_flat_hash_map* performance will be close to the one of its underlying *flat_hash_map*, and this is confirmed by the chart below: - -![stl_flat_par comparison](https://github.com/greg7mdp/parallel-hashmap/blob/master/html/img/stl_flat_par_speed.PNG?raw=true) - -Indeed, because of the scale is somewhat compressed due to the longer times of the std::unordered_map, we can barely distinguish between the blue curve of the *flat_hash_map* and the red curve of the *parallel_flat_hash_map*. So let's look at a graph without the std::unordered_map: - -![flat_par comparison](https://github.com/greg7mdp/parallel-hashmap/blob/master/html/img/flat_par_speed.PNG?raw=true) - -This last graph shows that the *parallel_flat_hash_map* is slightly slower especially for smaller table sizes. For a reason not obvious to me (maybe better memory locality), the speeds of the *parallel_flat_hash_map* and *flat_hash_map* are essentially undistinguishable for larger map sizes (> 80 million values). - -### Are we done yet? - -This is already looking pretty good. For large hash_maps, the *parallel_flat_hash_map* is a very appealing solution, as it provides essentially the excellent performance of the *flat_hash_map*, while virtually eliminating the peaks of memory usage which occur when the hash table resizes. - -But there is another aspect of the inherent parallelism of the *parallel_flat_hash_map* which is interesting to explore. As we know, typical hashmaps cannot be modified from multiple threads without explicit synchronization. And bracketing write accesses to a shared hash_map with synchronization primitives, such as mutexes, can reduce the concurrency of our program, and even cause deadlocks. - -Because the *parallel_flat_hash_map* is made of sixteen separate submaps, it posesses some intrinsic parallelism. Indeed, suppose you can make sure that different threads will use different submaps, you would be able to insert into the same *parallel_flat_hash_map* at the same time from the different threads without any locking. - -### Using the intrinsic parallelism of the *parallel_flat_hash_map* to insert values from multiple threads, lock free. - -So, if you can iterate over the values you want to insert into the hash table, the idea is that each thread will iterate over all values, and then for each value: - -1. compute the hash for that value -2. compute the submap index for that hash -3. if the submap index is one assigned to this thread, then insert the value, otherwise do nothing and continue to the next value - -Here is the code for the single-threaded insert: - -```c++ -template -void _fill_random_inner(int64_t cnt, HT &hash, RSU &rsu) -{ - for (int64_t i=0; i -void _fill_random_inner_mt(int64_t cnt, HT &hash, RSU &rsu) -{ - constexpr int64_t num_threads = 8; // has to be a power of two - std::unique_ptr threads[num_threads]; - - auto thread_fn = [&hash, cnt, num_threads](int64_t thread_idx, RSU rsu) { - size_t modulo = hash.subcnt() / num_threads; // subcnt() returns the number of submaps - - for (int64_t i=0; ijoin(); -} -``` - -Using multiple threads, we are able to populate the *parallel_flat_hash_map* (inserting 100 million values) three times faster than the standard *flat_hash_map* (which we could not have populated from multiple threads without explicit locks, which would have prevented performance improvements). - -And the graphical visualization of the results: - -![mt_stl_flat_par comparison](https://github.com/greg7mdp/parallel-hashmap/blob/master/html/img/mt_stl_flat_par_both_run2.PNG?raw=true) - -We notice in this last graph that the memory usage peaks, while still smaller than those of the *flat_hash_map*, are larger that those we saw when populating the *parallel_flat_hash_map* using a single thread. The obvious reason is that, when using a single thread, only one of the submaps would resize at a time, ensuring that the peak would only be 1/16th of the one for the *flat_hash_map* (provided of course that the hash function distributes the values somewhat evenly between the submaps). - -When running in multi-threaded mode (in this case eight threads), potentially as many as eight submaps can resize simultaneaously, so for a *parallel_flat_hash_map* with sixteen submaps the memory peak size can be half as large as the one for the *flat_hash_map*. - -Still, this is a pretty good result, we are now inserting values into our *parallel_flat_hash_map* three times faster than we were able to do using the *flat_hash_map*, while using a lower memory ceiling. - -This is significant, as the speed of insertion into a hash map is important in many algorithms, for example removing duplicates in a collection of values. - - -### Using the intrinsic parallelism of the *parallel_flat_hash_map* with internal mutexes - -It may not be practical to add logic into your program to ensure you use different internal submaps from each thread. Still, locking the whole *parallel_flat_hash_map* for each access would forego taking advantage of its intrinsic parallelism. - -For that reason, the *parallel_flat_hash_map* can provide internal locking using the `std::mutex` (the default template parameter is `phmap::NullMutex`, which does no locking and has no size cost). When selecting `std::mutex`, one mutex is created for each internal submap at a cost of 8 bytes per submap, and the *parallel_flat_hash_map* internally protects each submap access with its associated mutex. - - -| map | Number of submaps |sizeof(map) | -| :--- | :---: | ---: | -| std::unordered_map (vs2017) | - | 64 | -| phmap::flat_hash_map | - |48 | -| phmap::parallel_flat_hash_map, N=4, phmap::NullMutex | 16 |768 | -| phmap::parallel_flat_hash_map, N=4, std::mutex | 16 | 896 | - -It is about time we provide the complete parallel_flat_hash_map class declaration (the declaration for parallel_flat_hash_set is similar): - -``` -template , - class Eq = phmap::priv::hash_default_eq, - class Allocator = phmap::priv::Allocator>, // alias for std::allocator - size_t N = 4, // 2**N submaps - class Mutex = phmap::NullMutex> // use std::mutex to enable internal locks -class parallel_flat_hash_map; -``` - -Let's see what result we get for the insertion of random values from multiple threads, however this time we create a *parallel_flat_hash_map* with internal locking (by providing std::mutex as the last template argument), and modify the code so that each thread inserts values in any submap (no pre-selection). - -![no_preselection](https://github.com/greg7mdp/parallel-hashmap/blob/master/html/img/no_preselection.PNG?raw=true) - -If we were to do a intensive insertion test into a hash map from multiple threads, where we lock the whole hash table for each insertion, we would be likely to get even worse results than for a single threaded insert, because of heavy lock contention. - -In this case, our expectation is that the finer grained locking of the *parallel_flat_hash_map* (separate locks for each internal submap) will provide a speed benefit when compared to the single threaded insertion, and this is indeed what the benchmarks show: - -![flat_par_mutex_4](https://github.com/greg7mdp/parallel-hashmap/blob/master/html/img/flat_par_mutex_4.PNG?raw=true) - -Interestingly, we notice that the memory peaks (when resizing occur) are again very small, in the order of 1/16th of those for the *flat_hash_map*. This is likely because, as soon as one of the submaps resizes (which takes much longer than a regular insertion), the other threads very soon have to wait on the resizing submap's mutex for an insertion, before they reach their own resizing threashold. - -Since threads statistically will insert on a different submap for each value, it would be a surprising coincidence indeed if two submaps reached their resizing threshold without the resizing of the first submap blocking all the other threads first. - -If we increase the number of submaps, we should see more parallelism (less lock contention across threads, as the odds of two separate threads inserting in the same subhash is lower), but with diminishing returns as every submap resize will quickly block the other threads until the resize is completed. - -This is indeed what we see: - -![lock_various_sizes](https://github.com/greg7mdp/parallel-hashmap/blob/master/html/img/lock_various_sizes.PNG?raw=true) - -| map | Number of submaps |sizeof(map) | time 100M insertions | -| :--- | :---: | ---: | ---: | -| phmap::flat_hash_map | - |48 | 14.77s | -| phmap::parallel_flat_hash_map, N=4, std::mutex | 16 | 896 | 8.36s | -| phmap::parallel_flat_hash_map, N=5, std::mutex | 32 | 1792 | 7.14s | -| phmap::parallel_flat_hash_map, N=6, std::mutex | 64 | 3584 | 6.61s | - -There is still some overhead from the mutex lock/unlock, and the occasional lock contention, which prevents us from reaching the performance of the previous multithreaded lock-free insertion (5.12s for inserting 100M elements). - - -### In Conclusion - -We have seen that the novel parallel hashmap approach, used within a single thread, provides significant space advantages, with a very minimal time penalty. When used in a multi-thread context, the parallel hashmap still provides a significant space benefit, in addition to a consequential time benefit by reducing (or even eliminating) lock contention when accessing the parallel hashmap. - - -### Future work - -1. It would be beneficial to provide additional APIs for the *parallel_flat_hash_map* and *parallel_flat_hash_set* taking a precomputed hash value. This would enable the lock-free usage of the *parallel_flat_hash_map*, described above for multi-threaded environments, without requiring a double hash computation. - - -### Thanks - -I would like to thank Google's *Matt Kulukundis* for his eye-opening presentation of the *flat_hash_map* design at CPPCON 2017 - my frustration with not being able to use it helped trigger my insight into the *parallel_flat_hash_map*. Also many thanks to the Abseil container developers - I believe the main contributors are *Alkis Evlogimenos* and *Roman Perepelitsa* - who created an excellent codebase into which the graft of this new hashmap took easily, and finally to Google for open-sourcing Abseil. Thanks also to my son *Andre* for reviewing this paper, and for his patience when I was rambling about the *parallel_flat_hash_map* and its benefits. - - -### Links - -[Repository for the Parallel Hashmap, including the benchmark code used in this paper](https://github.com/greg7mdp/parallel-hashmap) - -[Swiss Tables doc](https://abseil.io/blog/20180927-swisstables) - -[Google Abseil repository](https://github.com/abseil/abseil-cpp) - -[Matt Kulukindis: Designing a Fast, Efficient, Cache-friendly Hash Table, Step by Step](https://www.youtube.com/watch?v=ncHmEUmJZf4) - diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/parallel_hashmap.pdf b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/parallel_hashmap.pdf deleted file mode 100644 index 28d05c0..0000000 Binary files a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/parallel_hashmap.pdf and /dev/null differ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/template.html b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/template.html deleted file mode 100644 index a48f9d2..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/template.html +++ /dev/null @@ -1,50 +0,0 @@ - - - - - The Parallel Hashmap (Gregory Popovitch) - - - - - - - - - - - $if(highlighting-css)$ - - $endif$ - $for(css)$ - - $endfor$ - $if(math)$ - $if(html5)$ - $else$ - $math$ - $endif$ - $endif$ - $for(header-includes)$ - $header-includes$ - $endfor$ - - - - -
- -
- -
- -$body$ -
-
- -
- - - diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/template.latex b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/template.latex deleted file mode 100644 index e78902e..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/html/template.latex +++ /dev/null @@ -1,308 +0,0 @@ -\documentclass[$if(fontsize)$$fontsize$,$endif$$if(lang)$$lang$,$endif$$if(papersize)$$papersize$,$endif$$for(classoption)$$classoption$$sep$,$endfor$]{$documentclass$} -\usepackage{geometry} -\usepackage{xcolor} -\usepackage{graphicx} -\usepackage[labelformat=empty]{caption} -\usepackage{afterpage} - -\newcommand\blankpage{% - \null - \thispagestyle{empty}% - \addtocounter{page}{-1}% - \newpage} - -$if(fontfamily)$ -\usepackage{$fontfamily$} -$else$ -\usepackage{lmodern} -$endif$ -$if(linestretch)$ -\usepackage{setspace} -\setstretch{$linestretch$} -$endif$ -\usepackage{amssymb,amsmath} -\usepackage{ifxetex,ifluatex} -\usepackage{fixltx2e} % provides \textsubscript -\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex - \usepackage[T1]{fontenc} - \usepackage[utf8]{inputenc} -$if(euro)$ - \usepackage{eurosym} -$endif$ -\else % if luatex or xelatex - \ifxetex - \usepackage{mathspec} - \usepackage{xltxtra,xunicode} - \else - \usepackage{fontspec} - \fi - \defaultfontfeatures{Mapping=tex-text,Scale=MatchLowercase} - \newcommand{\euro}{€} -$if(mainfont)$ - \setmainfont{$mainfont$} -$endif$ -$if(sansfont)$ - \setsansfont{$sansfont$} -$endif$ -$if(monofont)$ - %\setmonofont[Mapping=tex-ansi]{$monofont$} - % custom override -$endif$ - -$if(mathfont)$ - \setmathfont(Digits,Latin,Greek){$mathfont$} -$endif$ - -\usepackage{fontspec} -\setmainfont[Ligatures=Common, - ItalicFont={Adobe Garamond Pro Italic}] - {Adobe Garamond Pro} -\setmonofont[Ligatures=NoCommon]{Source Code Pro} - -\fi -% use upquote if available, for straight quotes in verbatim environments -\IfFileExists{upquote.sty}{\usepackage{upquote}}{} -% use microtype if available -\IfFileExists{microtype.sty}{% -\usepackage{microtype} -\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts -}{} -$if(geometry)$ -\usepackage[$for(geometry)$$geometry$$sep$,$endfor$]{geometry} -$endif$ -$if(lang)$ -\ifxetex - \usepackage{polyglossia} - \setmainlanguage{$mainlang$} -\else - \usepackage[shorthands=off,$lang$]{babel} -\fi -$endif$ -$if(natbib)$ -\usepackage{natbib} -\bibliographystyle{$if(biblio-style)$$biblio-style$$else$plainnat$endif$} -$endif$ -$if(biblatex)$ -\usepackage{biblatex} -$if(biblio-files)$ -\bibliography{$biblio-files$} -$endif$ -$endif$ -$if(listings)$ -\usepackage{listings} -$endif$ -$if(lhs)$ -\lstnewenvironment{code}{\lstset{language=C++,basicstyle=\small\ttfamily}}{} -$endif$ -$if(highlighting-macros)$ -$highlighting-macros$ -$endif$ -$if(verbatim-in-note)$ -\usepackage{fancyvrb} -\VerbatimFootnotes -$endif$ -$if(tables)$ -\usepackage{longtable,booktabs} -$endif$ -$if(graphics)$ -\usepackage{graphicx} -% Redefine \includegraphics so that, unless explicit options are -% given, the image width will not exceed the width or the height of the page. -% Images get their normal width if they fit onto the page, but -% are scaled down if they would overflow the margins. -\makeatletter -\def\ScaleWidthIfNeeded{% - \ifdim\Gin@nat@width>\linewidth - \linewidth - \else - \Gin@nat@width - \fi -} -\def\ScaleHeightIfNeeded{% - \ifdim\Gin@nat@height>0.9\textheight - 0.9\textheight - \else - \Gin@nat@width - \fi -} -\makeatother -\setkeys{Gin}{width=\ScaleWidthIfNeeded,height=\ScaleHeightIfNeeded,keepaspectratio}% -$endif$ -\ifxetex - \usepackage[setpagesize=false, % page size defined by xetex - unicode=false, % unicode breaks when used with xetex - xetex]{hyperref} -\else - \usepackage[unicode=true]{hyperref} -\fi -\hypersetup{breaklinks=true, - bookmarks=true, - pdfauthor={$author-meta$}, - pdftitle={$title-meta$}, - colorlinks=true, - citecolor=$if(citecolor)$$citecolor$$else$blue$endif$, - urlcolor=$if(urlcolor)$$urlcolor$$else$blue$endif$, - linkcolor=$if(linkcolor)$$linkcolor$$else$magenta$endif$, - pdfborder={0 0 0}} -\urlstyle{same} % don't use monospace font for urls -$if(links-as-notes)$ -% Make links footnotes instead of hotlinks: -\renewcommand{\href}[2]{#2\footnote{\url{#1}}} -$endif$ -$if(strikeout)$ -\usepackage[normalem]{ulem} -% avoid problems with \sout in headers with hyperref: -\pdfstringdefDisableCommands{\renewcommand{\sout}{}} -$endif$ -\setlength{\parindent}{0pt} -\setlength{\parskip}{6pt plus 2pt minus 1pt} -\setlength{\emergencystretch}{3em} % prevent overfull lines -\providecommand{\tightlist}{% - \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} -$if(numbersections)$ -\setcounter{secnumdepth}{5} -$else$ -\setcounter{secnumdepth}{0} -$endif$ -$if(verbatim-in-note)$ -\VerbatimFootnotes % allows verbatim text in footnotes -$endif$ - -$if(title)$ -\title{$title$$if(subtitle)$\\\vspace{0.5em}{\large $subtitle$}$endif$} -$endif$ -$if(author)$ -\author{$for(author)$$author$$sep$ \and $endfor$} -$endif$ -\date{$date$} -$for(header-includes)$ -$header-includes$ -$endfor$ - -\begin{document} -$if(title)$ -% ---------- -% Title page -% ---------- - -\begin{titlepage} - -\definecolor{titlepagecolor}{cmyk}{1,.60,0,.40} -\definecolor{namecolor}{cmyk}{1,.50,0,.10} -\newgeometry{left=7.5cm} %defines the geometry for the titlepage -\pagecolor{titlepagecolor} -\begin{figure} - \centering - \includegraphics[width=3.2in]{img/c-plus-plus_logo.eps} -\end{figure} - -\color{white} -\makebox[0pt][l]{\rule{1.3\textwidth}{1pt}} -\par -\noindent -{\huge \textsf{C++ Insights}} -\par -\noindent -{\textit{\textsf{Some stuff that is good to know}}} -%\textbf{\textsf{Something}} \textcolor{namecolor}{\textsf{Else}} -\vfill -\noindent -%{\huge \textsf{C++ Insights}} -\vskip\baselineskip -\noindent -{\huge \textsf{Gregory Popovitch}} -\par -\textsf{Feb 2019 (Draft)} - -\end{titlepage} -\pagecolor{white} -\restoregeometry % restores the geometry - -% Filler page -%\null -%\thispagestyle{empty} -%\addtocounter{page}{-1} -%\newpage - -% Subtitle page -%\vfill{3in} -%\begin{centering} -%{\HUGE \textsf{C++ Insights}} -%\end{centering} - -%\thispagestyle{empty} -%\addtocounter{page}{-1} -%\newpage - -% Copyright page -\thispagestyle{empty} -\addtocounter{page}{-1} - -\begin{minipage}[b]{0.9\textwidth} -\footnotesize\raggedright -\setlength{\parskip}{0.5\baselineskip} - -\begin{textsf} -{\textbf{C++ Insights}}\\ -by Gregory Popovitch - -\par -Copyright \copyright\ 2019. \\ -\href{http://www.tbd.com}{www.tbd.com} - -\par -(c) copyright 2019, Steven Gregory Popovitch -\end{textsf} -\end{minipage} -\vspace*{2\baselineskip} -\cleardoublepage - -$endif$ -$if(abstract)$ -\begin{abstract} -$abstract$ -\end{abstract} -$endif$ - -$for(include-before)$ -$include-before$ - -$endfor$ -$if(toc)$ -{ -\hypersetup{linkcolor=black} -\setcounter{tocdepth}{$toc-depth$} -\tableofcontents -} -\pagebreak -$endif$ -$if(lot)$ -\listoftables -$endif$ -$if(lof)$ -\listoffigures -$endif$ -$body$ - -$if(natbib)$ -$if(biblio-files)$ -$if(biblio-title)$ -$if(book-class)$ -\renewcommand\bibname{$biblio-title$} -$else$ -\renewcommand\refname{$biblio-title$} -$endif$ -$endif$ -\bibliography{$biblio-files$} - -$endif$ -$endif$ -$if(biblatex)$ -\printbibliography$if(biblio-title)$[title=$biblio-title$]$endif$ - -$endif$ -$for(include-after)$ -$include-after$ - -$endfor$ -\end{document} diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/index.html b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/index.html deleted file mode 100644 index 4d269b9..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/index.html +++ /dev/null @@ -1,348 +0,0 @@ - - - - - The Parallel Hashmap (Gregory Popovitch) - - - - - - - - - - - - - - - - -
- -
- -
- -
-

\[\newcommand{\andalso}{\quad\quad} -\newcommand{\infabbrev}[2]{\infax{#1 \quad\eqdef\quad #2}} -\newcommand{\infrule}[2]{\displaystyle \dfrac{#1}{#2}} -\newcommand{\ar}{\rightarrow} -\newcommand{\Int}{\mathtt{Int}} -\newcommand{\Bool}{\mathtt{Bool}} -\newcommand{\becomes}{\Downarrow} -\newcommand{\trule}[1]{(\textbf{#1})} -\newcommand{\FV}[1]{\mathtt{fv}(#1)} -\newcommand{\FTV}[1]{\mathtt{ftv}(#1)} -\newcommand{\BV}[1]{\mathtt{bv}(#1)} -\newcommand{\compiles}[1]{\text{C}\llbracket{#1}\rrbracket} -\newcommand{\exec}[1]{\text{E}\llbracket{#1}\rrbracket} -\renewcommand{\t}[1]{\mathtt{#1}} -\newcommand{\ite}[3]{\text{if }#1\text{ then }#2\text{ else }#3} -\]

-
-

The Parallel Hashmap

-

or Abseiling from the shoulders of giants - © Gregory Popovitch - March 10, 2019

-

[tl;dr] We present a novel hashmap design, the Parallel Hashmap. Built on a modified version of Abseil's flat_hash_map, the Parallel Hashmap has lower space requirements, is nearly as fast as the underlying flat_hash_map, and can be used from multiple threads with high levels of concurrency. The parallel hashmap repository provides header-only version of the flat and node hashmaps, and their parallel versions as well.

-

A quick look at the current state of the art

-

If you haven't been living under a rock, you know that Google open sourced late last year their Abseil library, which includes a very efficient flat hash table implementation. The absl::flat_hash_map stores the values directly in a memory array, which avoids memory indirections (this is referred to as closed hashing).

-

closed_hashing

-

Using parallel SSE2 instructions, the flat hash table is able to look up items by checking 16 slots in parallel, which allows the implementation to remain fast even when the table is filled to 87.5% capacity.

-

The graphs below show a comparison of time and memory usage necessary to insert up to 100 million values (each value is composed of two 8-byte integers), between the default hashmap of Visual Studio 2017 (std::unordered_map), and Abseil's flat_hash_map:

-

stl_flat comparison

-

On the bottom graph, we can see that, as expected, the Abseil flat_hash_map is significantly faster that the default stl implementation, typically about three times faster.

-

The peak memory usage issue

-

The top graph shown the memory usage for both tables.

-

I used a separate thread to monitor the memory usage, which allows to track the increased memory usage when the table resizes. Indeed, both tables have a peak memory usage that is significantly higher than the memory usage seen between insertions.

-

In the case of Abseil's flat_hash_map, the values are stored directly in a memory array. The memory usage is constant until the table needs to resize, which is why we see these horizontal sections of memory usage.

-

When the flat_hash_map reaches 87.5% occupancy, a new array of twice the size is allocated, the values are moved (rehashed) from the smaller to the larger array, and then the smaller array, now empty, is freed. So we see that during the resize, the occupancy is only one third of 87.5%, or 29.1%, and when the smaller array is released, occupancy is half of 87.5% or 43.75%.

-

The default STL implementation is also subject to this higher peak memory usage, since it typically is implemented with an array of buckets, each bucket having a pointer to a linked list of nodes containing the values. In order to maintain O(1) lookups, the array of buckets also needs to be resized as the table size grows, requiring a 3x temporary memory requirement for moving the old bucket array (1x) to the newly allocated, larger (2x) array. In between the bucket array resizes, the default STL implementation memory usage grows at a constant rate as new values are added to the linked lists.

-
-

Instead of having a separate linked list for each bucket, std::unordered_map implementations often use a single linked list (making iteration faster), with buckets pointing to locations within the single linked list. absl::node_hash_map, on the other hand, has each bucket pointing to a single value, and collisions are handled with open addressing like for the absl::flat_hash_map.

-
-

This peak memory usage can be the limiting factor for large tables. Suppose you are on a machine with 32 GB of ram, and the flat_hash_map needs to resize when you inserted 10 GB of values in it. 10 GB of values means the array size is 11.42 GB (resizing at 87.5% occupancy), and we need to allocate a new array of double size (22.85 GB), which obviously will not be possible on our 32 GB machine.

-

For my work developing mechanical engineering software, this has kept me from using flat hash maps, as the high peak memory usage was the limiting factor for the size of FE models which could be loaded on a given machine. So I used other types of maps, such as sparsepp or Google's cpp-btree.

-

When the Abseil library was open sourced, I started pondering the issue again. Compared to Google's old dense_hash_map which resized at 50% capacity, the new absl::flat_hash_map resizing at 87.5% capacity was more memory friendly, but it still had these significant peaks of memory usage when resizing.

-

If only there was a way to eliminate those peaks, the flat_hash_map would be close to perfect. But how?

-

The peak memory usage solution

-

Suddenly, it hit me. I had a solution. I would create a hash table that internally is made of an array of 16 hash tables (the submaps). When inserting or looking up an item, the index of the target submap would be decided by the hash of the value to insert. For example, if for a given size_t hashval, the index for the internal submap would be computed with:

-

submap_index = (hashval ^ (hashval >> 4)) & 0xF;

-

providing an index between 0 and 15.

-
-

In the actual implementation, the size of the array of hash tables is configurable to a power of two, so it can be 2, 4, 8, 16, 32, ... The following illustration shows a parallel_hash_map with 8 submaps.

-
-

index_computation

-

The benefit of this approach would be that the internal tables would each resize on its own when they reach 87.5% capacity, and since each table contains approximately one sixteenth of the values, the memory usage peak would be only one sixteenth of the size we saw for the single flat_hash_map.

-

The rest of this article describes my implementation of this concept that I have done in my parallel hashmap repository. This is a header only library, which provides the following eight hashmaps:

-
    -
  • phmap::flat_hash_set
  • -
  • phmap::flat_hash_map
  • -
  • phmap::node_hash_set
  • -
  • phmap::node_hash_map
  • -
  • phmap::parallel_flat_hash_set
  • -
  • phmap::parallel_flat_hash_map
  • -
  • phmap::parallel_node_hash_set
  • -
  • phmap::parallel_node_hash_map
  • -
-

This implementation requires a C++11 compatible compiler, and provides full compatibility with the std::unordered_map (with the exception of pointer stability for the flat versions. C++14 and C++17 methods, like try-emplace, are provided as well. The names for it are parallel_flat_hash_map or parallel_flat_hash_set, and the node equivalents. These hashmaps provide the same external API as the flat_hash_map, and internally use a std::array of 2**N flat_hash_maps.

-

I was delighted to find out that not only the parallel_flat_hash_map has significant memory usage benefits compared to the flat_hash_map, but it also has significant advantages for concurrent programming as I will show later. In the rest of this article, we will focus on the parallel_flat_hash_map, but similar results are seen for the parallel_node_hash_map, and the set versions of course.

-

The Parallel Hashmap: memory usage

-

So, without further ado, let's see the same graphs graphs as above, with the addition of the parallel_flat_hash_map. Let us first look at memory usage (the second graph provides a "zoomed-in" view of the location where resizing occurs):

-

stl_flat_par comparison

-

stl_flat_par_zoomed comparison

-

We see that the parallel_flat_hash_map behaves as expected. The memory usage matches exactly the memory usage of its base flat_hash_map, except that the peaks of memory usage which occur when the table resizes are drastically reduced, to the point that they are not objectionable anymore. In the "zoomed-in" view, we can see the sixteen dots corresponding to each of the individual submaps resizing. The fact that those resizes are occuring at roughly the same x location in the graph shows that we have a good hash function distribution, distributing the values evenly between the sixteen individual submaps.

-

The Parallel Hashmap: speed

-

But what about the speed? After all, for each value inserted into the parallel hashmap, we have to do some extra work (steps 1 and 2 below):

-
    -
  1. compute the hash for the value to insert
  2. -
  3. compute the index of the target submap from the hash)
  4. -
  5. insert the value into the submap
  6. -
-

The first step (compute the hash) is the most problematic one, as it can potentially be costly. As we mentioned above, the second step (computing the index from the hash) is very simple and its cost in minimal (3 processor instruction as shown below in Matt Godbolt's compiler explorer):

-

index computation cost

-

As for the hash value computation, fortunately we can eliminate this cost by providing the computed hash to the submap functions, so that it is computed only once. This is exactly what I have done in my implementation of the parallel_flat_hash_map, adding a few extra APIs to the internal raw_hash_map.h header, which allow the parallel_flat_hash_map to pass the precomputed hash value to the underlying submaps.

-

So we have all but eliminated the cost of the first step, and seen that the cost of the second step is very minimal. At this point we expect that the parallel_flat_hash_map performance will be close to the one of its underlying flat_hash_map, and this is confirmed by the chart below:

-

stl_flat_par comparison

-

Indeed, because of the scale is somewhat compressed due to the longer times of the std::unordered_map, we can barely distinguish between the blue curve of the flat_hash_map and the red curve of the parallel_flat_hash_map. So let's look at a graph without the std::unordered_map:

-

flat_par comparison

-

This last graph shows that the parallel_flat_hash_map is slightly slower especially for smaller table sizes. For a reason not obvious to me (maybe better memory locality), the speeds of the parallel_flat_hash_map and flat_hash_map are essentially undistinguishable for larger map sizes (> 80 million values).

-

Are we done yet?

-

This is already looking pretty good. For large hash_maps, the parallel_flat_hash_map is a very appealing solution, as it provides essentially the excellent performance of the flat_hash_map, while virtually eliminating the peaks of memory usage which occur when the hash table resizes.

-

But there is another aspect of the inherent parallelism of the parallel_flat_hash_map which is interesting to explore. As we know, typical hashmaps cannot be modified from multiple threads without explicit synchronization. And bracketing write accesses to a shared hash_map with synchronization primitives, such as mutexes, can reduce the concurrency of our program, and even cause deadlocks.

-

Because the parallel_flat_hash_map is made of sixteen separate submaps, it posesses some intrinsic parallelism. Indeed, suppose you can make sure that different threads will use different submaps, you would be able to insert into the same parallel_flat_hash_map at the same time from the different threads without any locking.

-

Using the intrinsic parallelism of the parallel_flat_hash_map to insert values from multiple threads, lock free.

-

So, if you can iterate over the values you want to insert into the hash table, the idea is that each thread will iterate over all values, and then for each value:

-
    -
  1. compute the hash for that value
  2. -
  3. compute the submap index for that hash
  4. -
  5. if the submap index is one assigned to this thread, then insert the value, otherwise do nothing and continue to the next value
  6. -
-

Here is the code for the single-threaded insert:

- -

and here is the code for the multi-threaded insert:

-
template <class HT>
-void _fill_random_inner_mt(int64_t cnt, HT &hash, RSU &rsu)
-{
-    constexpr int64_t num_threads = 8;   // has to be a power of two
-    std::unique_ptr<std::thread> threads[num_threads];
-
-    auto thread_fn = [&hash, cnt, num_threads](int64_t thread_idx, RSU rsu) {
-        size_t modulo = hash.subcnt() / num_threads;        // subcnt() returns the number of submaps
-
-        for (int64_t i=0; i<cnt; ++i)                       // iterate over all values
-        {
-            unsigned int key = rsu.next();                  // get next key to insert
-            size_t hashval = hash.hash(key);                // compute its hash
-            size_t idx  = hash.subidx(hashval);             // compute the submap index for this hash
-            if (idx / modulo == thread_idx)                 // if the submap is suitable for this thread
-            {
-                hash.insert(typename HT::value_type(key, 0)); // insert the value
-                ++(num_keys[thread_idx]);                     // increment count of inserted values
-            }
-        }
-    };
-
-    // create and start 8 threads - each will insert in their own submaps
-    // thread 0 will insert the keys whose hash direct them to submap0 or submap1
-    // thread 1 will insert the keys whose hash direct them to submap2 or submap3
-    // --------------------------------------------------------------------------
-    for (int64_t i=0; i<num_threads; ++i)
-        threads[i].reset(new std::thread(thread_fn, i, rsu));
-
-    // rsu passed by value to threads... we need to increment the reference object
-    for (int64_t i=0; i<cnt; ++i)
-        rsu.next();
-    
-    // wait for the threads to finish their work and exit
-    for (int64_t i=0; i<num_threads; ++i)
-        threads[i]->join();
-}
-

Using multiple threads, we are able to populate the parallel_flat_hash_map (inserting 100 million values) three times faster than the standard flat_hash_map (which we could not have populated from multiple threads without explicit locks, which would have prevented performance improvements).

-

And the graphical visualization of the results:

-

mt_stl_flat_par comparison

-

We notice in this last graph that the memory usage peaks, while still smaller than those of the flat_hash_map, are larger that those we saw when populating the parallel_flat_hash_map using a single thread. The obvious reason is that, when using a single thread, only one of the submaps would resize at a time, ensuring that the peak would only be 1/16th of the one for the flat_hash_map (provided of course that the hash function distributes the values somewhat evenly between the submaps).

-

When running in multi-threaded mode (in this case eight threads), potentially as many as eight submaps can resize simultaneaously, so for a parallel_flat_hash_map with sixteen submaps the memory peak size can be half as large as the one for the flat_hash_map.

-

Still, this is a pretty good result, we are now inserting values into our parallel_flat_hash_map three times faster than we were able to do using the flat_hash_map, while using a lower memory ceiling.

-

This is significant, as the speed of insertion into a hash map is important in many algorithms, for example removing duplicates in a collection of values.

-

Using the intrinsic parallelism of the parallel_flat_hash_map with internal mutexes

-

It may not be practical to add logic into your program to ensure you use different internal submaps from each thread. Still, locking the whole parallel_flat_hash_map for each access would forego taking advantage of its intrinsic parallelism.

-

For that reason, the parallel_flat_hash_map can provide internal locking using the std::mutex (the default template parameter is phmap::NullMutex, which does no locking and has no size cost). When selecting std::mutex, one mutex is created for each internal submap at a cost of 8 bytes per submap, and the parallel_flat_hash_map internally protects each submap access with its associated mutex.

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
mapNumber of submapssizeof(map)
std::unordered_map (vs2017)-64
phmap::flat_hash_map-48
phmap::parallel_flat_hash_map, N=4, phmap::NullMutex16768
phmap::parallel_flat_hash_map, N=4, phmap::Mutex16896
-

It is about time we provide the complete parallel_flat_hash_map class declaration (the declaration for parallel_flat_hash_set is similar):

-
template <class K, class V,
-          class Hash      = phmap::priv::hash_default_hash<K>,
-          class Eq        = phmap::priv::hash_default_eq<K>,
-          class Allocator = phmap::priv::Allocator<std::pair<const K, V>>, // alias for std::allocator
-          size_t N        = 4,                 // 2**N submaps
-          class Mutex     = phmap::NullMutex>   // use std::mutex to enable internal locks
-class parallel_flat_hash_map;
-
-

Let's see what result we get for the insertion of random values from multiple threads, however this time we create a parallel_flat_hash_map with internal locking (by providing std::mutex as the last template argument), and modify the code so that each thread inserts values in any submap (no pre-selection).

-

no_preselection

-

If we were to do a intensive insertion test into a hash map from multiple threads, where we lock the whole hash table for each insertion, we would be likely to get even worse results than for a single threaded insert, because of heavy lock contention.

-

In this case, our expectation is that the finer grained locking of the parallel_flat_hash_map (separate locks for each internal submap) will provide a speed benefit when compared to the single threaded insertion, and this is indeed what the benchmarks show:

-

flat_par_mutex_4

-

Interestingly, we notice that the memory peaks (when resizing occur) are again very small, in the order of 1/16th of those for the flat_hash_map. This is likely because, as soon as one of the submaps resizes (which takes much longer than a regular insertion), the other threads very soon have to wait on the resizing submap's mutex for an insertion, before they reach their own resizing threashold.

-

Since threads statistically will insert on a different submap for each value, it would be a surprising coincidence indeed if two submaps reached their resizing threshold without the resizing of the first submap blocking all the other threads first.

-

If we increase the number of submaps, we should see more parallelism (less lock contention across threads, as the odds of two separate threads inserting in the same subhash is lower), but with diminishing returns as every submap resize will quickly block the other threads until the resize is completed.

-

This is indeed what we see:

-

lock_various_sizes

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
mapNumber of submapssizeof(map)time 100M insertions
phmap::flat_hash_map-4814.77s
phmap::parallel_flat_hash_map, N=4, std::mutex168968.36s
phmap::parallel_flat_hash_map, N=5, std::mutex3217927.14s
phmap::parallel_flat_hash_map, N=6, std::mutex6435846.61s
-

There is still some overhead from the mutex lock/unlock, and the occasional lock contention, which prevents us from reaching the performance of the previous multithreaded lock-free insertion (5.12s for inserting 100M elements).

-

In Conclusion

-

We have seen that the novel parallel hashmap approach, used within a single thread, provides significant space advantages, with a very minimal time penalty. When used in a multi-thread context, the parallel hashmap still provides a significant space benefit, in addition to a consequential time benefit by reducing (or even eliminating) lock contention when accessing the parallel hashmap.

-

Future work

-
    -
  1. It would be beneficial to provide additional APIs for the parallel_flat_hash_map and parallel_flat_hash_set taking a precomputed hash value. This would enable the lock-free usage of the parallel_flat_hash_map, described above for multi-threaded environments, without requiring a double hash computation.
  2. -
-

Thanks

-

I would like to thank Google's Matt Kulukundis for his eye-opening presentation of the flat_hash_map design at CPPCON 2017 - my frustration with not being able to use it helped trigger my insight into the parallel_flat_hash_map. Also many thanks to the Abseil container developers - I believe the main contributors are Alkis Evlogimenos and Roman Perepelitsa - who created an excellent codebase into which the graft of this new hashmap took easily, and finally to Google for open-sourcing Abseil. Thanks also to my son Andre for reviewing this paper, and for his patience when I was rambling about the parallel_flat_hash_map and its benefits.

- -

Repository for the Parallel Hashmap, including the benchmark code used in this paper

-

Swiss Tables doc

-

Google Abseil repository

-

Matt Kulukindis: Designing a Fast, Efficient, Cache-friendly Hash Table, Step by Step

-
-
- -
- - - diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/parallel_hashmap/btree.h b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/parallel_hashmap/btree.h deleted file mode 100644 index 814f18a..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/parallel_hashmap/btree.h +++ /dev/null @@ -1,4080 +0,0 @@ -// --------------------------------------------------------------------------- -// Copyright (c) 2019, Gregory Popovitch - greg7mdp@gmail.com -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Includes work from abseil-cpp (https://github.com/abseil/abseil-cpp) -// with modifications. -// -// Copyright 2018 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// --------------------------------------------------------------------------- - -#ifndef PHMAP_BTREE_BTREE_CONTAINER_H_ -#define PHMAP_BTREE_BTREE_CONTAINER_H_ - -#ifdef _MSC_VER - #pragma warning(push) - - #pragma warning(disable : 4127) // conditional expression is constant - #pragma warning(disable : 4324) // structure was padded due to alignment specifier - #pragma warning(disable : 4355) // 'this': used in base member initializer list - #pragma warning(disable : 4365) // conversion from 'int' to 'const unsigned __int64', signed/unsigned mismatch - #pragma warning(disable : 4514) // unreferenced inline function has been removed - #pragma warning(disable : 4623) // default constructor was implicitly defined as deleted - #pragma warning(disable : 4625) // copy constructor was implicitly defined as deleted - #pragma warning(disable : 4626) // assignment operator was implicitly defined as deleted - #pragma warning(disable : 4710) // function not inlined - #pragma warning(disable : 4711) // selected for automatic inline expansion - #pragma warning(disable : 4820) // '6' bytes padding added after data member - #pragma warning(disable : 4868) // compiler may not enforce left-to-right evaluation order in braced initializer list - #pragma warning(disable : 5026) // move constructor was implicitly defined as deleted - #pragma warning(disable : 5027) // move assignment operator was implicitly defined as deleted - #pragma warning(disable : 5045) // Compiler will insert Spectre mitigation for memory load if /Qspectre switch specified -#endif - - -#include -#include -#include -#include -#include - -#include "phmap_fwd_decl.h" -#include "phmap_base.h" - -#if PHMAP_HAVE_STD_STRING_VIEW - #include -#endif - -// MSVC constructibility traits do not detect destructor properties and so our -// implementations should not use them as a source-of-truth. -#if defined(_MSC_VER) && !defined(__clang__) && !defined(__GNUC__) - #define PHMAP_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION 1 -#endif - -namespace phmap { - - // Defined and documented later on in this file. - template - struct is_trivially_destructible; - - // Defined and documented later on in this file. - template - struct is_trivially_move_assignable; - - namespace type_traits_internal { - - // Silence MSVC warnings about the destructor being defined as deleted. -#if defined(_MSC_VER) && !defined(__GNUC__) - #pragma warning(push) - #pragma warning(disable : 4624) -#endif // defined(_MSC_VER) && !defined(__GNUC__) - - template - union SingleMemberUnion { - T t; - }; - - // Restore the state of the destructor warning that was silenced above. -#if defined(_MSC_VER) && !defined(__GNUC__) - #pragma warning(pop) -#endif // defined(_MSC_VER) && !defined(__GNUC__) - - template - struct IsTriviallyMoveConstructibleObject - : std::integral_constant< - bool, std::is_move_constructible< - type_traits_internal::SingleMemberUnion>::value && - phmap::is_trivially_destructible::value> {}; - - template - struct IsTriviallyCopyConstructibleObject - : std::integral_constant< - bool, std::is_copy_constructible< - type_traits_internal::SingleMemberUnion>::value && - phmap::is_trivially_destructible::value> {}; - - template - struct IsTriviallyMoveAssignableReference : std::false_type {}; - - template - struct IsTriviallyMoveAssignableReference - : phmap::is_trivially_move_assignable::type {}; - - template - struct IsTriviallyMoveAssignableReference - : phmap::is_trivially_move_assignable::type {}; - - } // namespace type_traits_internal - - - template - using void_t = typename type_traits_internal::VoidTImpl::type; - - - template - struct is_function - : std::integral_constant< - bool, !(std::is_reference::value || - std::is_const::type>::value)> {}; - - - namespace type_traits_internal { - - template - class is_trivially_copyable_impl { - using ExtentsRemoved = typename std::remove_all_extents::type; - static constexpr bool kIsCopyOrMoveConstructible = - std::is_copy_constructible::value || - std::is_move_constructible::value; - static constexpr bool kIsCopyOrMoveAssignable = - phmap::is_copy_assignable::value || - phmap::is_move_assignable::value; - - public: - static constexpr bool kValue = - (__has_trivial_copy(ExtentsRemoved) || !kIsCopyOrMoveConstructible) && - (__has_trivial_assign(ExtentsRemoved) || !kIsCopyOrMoveAssignable) && - (kIsCopyOrMoveConstructible || kIsCopyOrMoveAssignable) && - is_trivially_destructible::value && - // We need to check for this explicitly because otherwise we'll say - // references are trivial copyable when compiled by MSVC. - !std::is_reference::value; - }; - - template - struct is_trivially_copyable - : std::integral_constant< - bool, type_traits_internal::is_trivially_copyable_impl::kValue> {}; - } // namespace type_traits_internal - - namespace swap_internal { - - // Necessary for the traits. - using std::swap; - - // This declaration prevents global `swap` and `phmap::swap` overloads from being - // considered unless ADL picks them up. - void swap(); - - template - using IsSwappableImpl = decltype(swap(std::declval(), std::declval())); - - // NOTE: This dance with the default template parameter is for MSVC. - template (), std::declval()))>> - using IsNothrowSwappableImpl = typename std::enable_if::type; - - template - struct IsSwappable - : phmap::type_traits_internal::is_detected {}; - - template - struct IsNothrowSwappable - : phmap::type_traits_internal::is_detected {}; - - template ::value, int> = 0> - void Swap(T& lhs, T& rhs) noexcept(IsNothrowSwappable::value) { - swap(lhs, rhs); - } - - using StdSwapIsUnconstrained = IsSwappable; - - } // namespace swap_internal - - namespace type_traits_internal { - - // Make the swap-related traits/function accessible from this namespace. - using swap_internal::IsNothrowSwappable; - using swap_internal::IsSwappable; - using swap_internal::Swap; - using swap_internal::StdSwapIsUnconstrained; - - } // namespace type_traits_internal - - namespace compare_internal { - - using value_type = int8_t; - - template - struct Fail { - static_assert(sizeof(T) < 0, "Only literal `0` is allowed."); - }; - - template - struct OnlyLiteralZero { - constexpr OnlyLiteralZero(NullPtrT) noexcept {} // NOLINT - - template < - typename T, - typename = typename std::enable_if< - std::is_same::value || - (std::is_integral::value && !std::is_same::value)>::type, - typename = typename Fail::type> - OnlyLiteralZero(T); // NOLINT - }; - - enum class eq : value_type { - equal = 0, - equivalent = equal, - nonequal = 1, - nonequivalent = nonequal, - }; - - enum class ord : value_type { less = -1, greater = 1 }; - - enum class ncmp : value_type { unordered = -127 }; - -#if defined(__cpp_inline_variables) && !defined(_MSC_VER) - -#define PHMAP_COMPARE_INLINE_BASECLASS_DECL(name) - -#define PHMAP_COMPARE_INLINE_SUBCLASS_DECL(type, name) \ - static const type name; - -#define PHMAP_COMPARE_INLINE_INIT(type, name, init) \ - inline constexpr type type::name(init) - -#else // __cpp_inline_variables - -#define PHMAP_COMPARE_INLINE_BASECLASS_DECL(name) \ - static const T name; - -#define PHMAP_COMPARE_INLINE_SUBCLASS_DECL(type, name) - -#define PHMAP_COMPARE_INLINE_INIT(type, name, init) \ - template \ - const T compare_internal::type##_base::name(init) - -#endif // __cpp_inline_variables - - // These template base classes allow for defining the values of the constants - // in the header file (for performance) without using inline variables (which - // aren't available in C++11). - template - struct weak_equality_base { - PHMAP_COMPARE_INLINE_BASECLASS_DECL(equivalent) - PHMAP_COMPARE_INLINE_BASECLASS_DECL(nonequivalent) - }; - - template - struct strong_equality_base { - PHMAP_COMPARE_INLINE_BASECLASS_DECL(equal) - PHMAP_COMPARE_INLINE_BASECLASS_DECL(nonequal) - PHMAP_COMPARE_INLINE_BASECLASS_DECL(equivalent) - PHMAP_COMPARE_INLINE_BASECLASS_DECL(nonequivalent) - }; - - template - struct partial_ordering_base { - PHMAP_COMPARE_INLINE_BASECLASS_DECL(less) - PHMAP_COMPARE_INLINE_BASECLASS_DECL(equivalent) - PHMAP_COMPARE_INLINE_BASECLASS_DECL(greater) - PHMAP_COMPARE_INLINE_BASECLASS_DECL(unordered) - }; - - template - struct weak_ordering_base { - PHMAP_COMPARE_INLINE_BASECLASS_DECL(less) - PHMAP_COMPARE_INLINE_BASECLASS_DECL(equivalent) - PHMAP_COMPARE_INLINE_BASECLASS_DECL(greater) - }; - - template - struct strong_ordering_base { - PHMAP_COMPARE_INLINE_BASECLASS_DECL(less) - PHMAP_COMPARE_INLINE_BASECLASS_DECL(equal) - PHMAP_COMPARE_INLINE_BASECLASS_DECL(equivalent) - PHMAP_COMPARE_INLINE_BASECLASS_DECL(greater) - }; - - } // namespace compare_internal - - class weak_equality - : public compare_internal::weak_equality_base { - explicit constexpr weak_equality(compare_internal::eq v) noexcept - : value_(static_cast(v)) {} - friend struct compare_internal::weak_equality_base; - - public: - PHMAP_COMPARE_INLINE_SUBCLASS_DECL(weak_equality, equivalent) - PHMAP_COMPARE_INLINE_SUBCLASS_DECL(weak_equality, nonequivalent) - - // Comparisons - friend constexpr bool operator==( - weak_equality v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.value_ == 0; - } - friend constexpr bool operator!=( - weak_equality v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.value_ != 0; - } - friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>, - weak_equality v) noexcept { - return 0 == v.value_; - } - friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>, - weak_equality v) noexcept { - return 0 != v.value_; - } - - private: - compare_internal::value_type value_; - }; - PHMAP_COMPARE_INLINE_INIT(weak_equality, equivalent, - compare_internal::eq::equivalent); - PHMAP_COMPARE_INLINE_INIT(weak_equality, nonequivalent, - compare_internal::eq::nonequivalent); - - class strong_equality - : public compare_internal::strong_equality_base { - explicit constexpr strong_equality(compare_internal::eq v) noexcept - : value_(static_cast(v)) {} - friend struct compare_internal::strong_equality_base; - - public: - PHMAP_COMPARE_INLINE_SUBCLASS_DECL(strong_equality, equal) - PHMAP_COMPARE_INLINE_SUBCLASS_DECL(strong_equality, nonequal) - PHMAP_COMPARE_INLINE_SUBCLASS_DECL(strong_equality, equivalent) - PHMAP_COMPARE_INLINE_SUBCLASS_DECL(strong_equality, nonequivalent) - - // Conversion - constexpr operator weak_equality() const noexcept { // NOLINT - return value_ == 0 ? weak_equality::equivalent - : weak_equality::nonequivalent; - } - // Comparisons - friend constexpr bool operator==( - strong_equality v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.value_ == 0; - } - friend constexpr bool operator!=( - strong_equality v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.value_ != 0; - } - friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>, - strong_equality v) noexcept { - return 0 == v.value_; - } - friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>, - strong_equality v) noexcept { - return 0 != v.value_; - } - - private: - compare_internal::value_type value_; - }; - - PHMAP_COMPARE_INLINE_INIT(strong_equality, equal, compare_internal::eq::equal); - PHMAP_COMPARE_INLINE_INIT(strong_equality, nonequal, - compare_internal::eq::nonequal); - PHMAP_COMPARE_INLINE_INIT(strong_equality, equivalent, - compare_internal::eq::equivalent); - PHMAP_COMPARE_INLINE_INIT(strong_equality, nonequivalent, - compare_internal::eq::nonequivalent); - - class partial_ordering - : public compare_internal::partial_ordering_base { - explicit constexpr partial_ordering(compare_internal::eq v) noexcept - : value_(static_cast(v)) {} - explicit constexpr partial_ordering(compare_internal::ord v) noexcept - : value_(static_cast(v)) {} - explicit constexpr partial_ordering(compare_internal::ncmp v) noexcept - : value_(static_cast(v)) {} - friend struct compare_internal::partial_ordering_base; - - constexpr bool is_ordered() const noexcept { - return value_ != - compare_internal::value_type(compare_internal::ncmp::unordered); - } - - public: - PHMAP_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, less) - PHMAP_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, equivalent) - PHMAP_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, greater) - PHMAP_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, unordered) - - // Conversion - constexpr operator weak_equality() const noexcept { // NOLINT - return value_ == 0 ? weak_equality::equivalent - : weak_equality::nonequivalent; - } - // Comparisons - friend constexpr bool operator==( - partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.is_ordered() && v.value_ == 0; - } - friend constexpr bool operator!=( - partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { - return !v.is_ordered() || v.value_ != 0; - } - friend constexpr bool operator<( - partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.is_ordered() && v.value_ < 0; - } - friend constexpr bool operator<=( - partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.is_ordered() && v.value_ <= 0; - } - friend constexpr bool operator>( - partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.is_ordered() && v.value_ > 0; - } - friend constexpr bool operator>=( - partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.is_ordered() && v.value_ >= 0; - } - friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>, - partial_ordering v) noexcept { - return v.is_ordered() && 0 == v.value_; - } - friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>, - partial_ordering v) noexcept { - return !v.is_ordered() || 0 != v.value_; - } - friend constexpr bool operator<(compare_internal::OnlyLiteralZero<>, - partial_ordering v) noexcept { - return v.is_ordered() && 0 < v.value_; - } - friend constexpr bool operator<=(compare_internal::OnlyLiteralZero<>, - partial_ordering v) noexcept { - return v.is_ordered() && 0 <= v.value_; - } - friend constexpr bool operator>(compare_internal::OnlyLiteralZero<>, - partial_ordering v) noexcept { - return v.is_ordered() && 0 > v.value_; - } - friend constexpr bool operator>=(compare_internal::OnlyLiteralZero<>, - partial_ordering v) noexcept { - return v.is_ordered() && 0 >= v.value_; - } - - private: - compare_internal::value_type value_; - }; - - PHMAP_COMPARE_INLINE_INIT(partial_ordering, less, compare_internal::ord::less); - PHMAP_COMPARE_INLINE_INIT(partial_ordering, equivalent, - compare_internal::eq::equivalent); - PHMAP_COMPARE_INLINE_INIT(partial_ordering, greater, - compare_internal::ord::greater); - PHMAP_COMPARE_INLINE_INIT(partial_ordering, unordered, - compare_internal::ncmp::unordered); - - class weak_ordering - : public compare_internal::weak_ordering_base { - explicit constexpr weak_ordering(compare_internal::eq v) noexcept - : value_(static_cast(v)) {} - explicit constexpr weak_ordering(compare_internal::ord v) noexcept - : value_(static_cast(v)) {} - friend struct compare_internal::weak_ordering_base; - - public: - PHMAP_COMPARE_INLINE_SUBCLASS_DECL(weak_ordering, less) - PHMAP_COMPARE_INLINE_SUBCLASS_DECL(weak_ordering, equivalent) - PHMAP_COMPARE_INLINE_SUBCLASS_DECL(weak_ordering, greater) - - // Conversions - constexpr operator weak_equality() const noexcept { // NOLINT - return value_ == 0 ? weak_equality::equivalent - : weak_equality::nonequivalent; - } - constexpr operator partial_ordering() const noexcept { // NOLINT - return value_ == 0 ? partial_ordering::equivalent - : (value_ < 0 ? partial_ordering::less - : partial_ordering::greater); - } - // Comparisons - friend constexpr bool operator==( - weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.value_ == 0; - } - friend constexpr bool operator!=( - weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.value_ != 0; - } - friend constexpr bool operator<( - weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.value_ < 0; - } - friend constexpr bool operator<=( - weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.value_ <= 0; - } - friend constexpr bool operator>( - weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.value_ > 0; - } - friend constexpr bool operator>=( - weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.value_ >= 0; - } - friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>, - weak_ordering v) noexcept { - return 0 == v.value_; - } - friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>, - weak_ordering v) noexcept { - return 0 != v.value_; - } - friend constexpr bool operator<(compare_internal::OnlyLiteralZero<>, - weak_ordering v) noexcept { - return 0 < v.value_; - } - friend constexpr bool operator<=(compare_internal::OnlyLiteralZero<>, - weak_ordering v) noexcept { - return 0 <= v.value_; - } - friend constexpr bool operator>(compare_internal::OnlyLiteralZero<>, - weak_ordering v) noexcept { - return 0 > v.value_; - } - friend constexpr bool operator>=(compare_internal::OnlyLiteralZero<>, - weak_ordering v) noexcept { - return 0 >= v.value_; - } - - private: - compare_internal::value_type value_; - }; - - PHMAP_COMPARE_INLINE_INIT(weak_ordering, less, compare_internal::ord::less); - PHMAP_COMPARE_INLINE_INIT(weak_ordering, equivalent, - compare_internal::eq::equivalent); - PHMAP_COMPARE_INLINE_INIT(weak_ordering, greater, - compare_internal::ord::greater); - - class strong_ordering - : public compare_internal::strong_ordering_base { - explicit constexpr strong_ordering(compare_internal::eq v) noexcept - : value_(static_cast(v)) {} - explicit constexpr strong_ordering(compare_internal::ord v) noexcept - : value_(static_cast(v)) {} - friend struct compare_internal::strong_ordering_base; - - public: - PHMAP_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, less) - PHMAP_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, equal) - PHMAP_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, equivalent) - PHMAP_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, greater) - - // Conversions - constexpr operator weak_equality() const noexcept { // NOLINT - return value_ == 0 ? weak_equality::equivalent - : weak_equality::nonequivalent; - } - constexpr operator strong_equality() const noexcept { // NOLINT - return value_ == 0 ? strong_equality::equal : strong_equality::nonequal; - } - constexpr operator partial_ordering() const noexcept { // NOLINT - return value_ == 0 ? partial_ordering::equivalent - : (value_ < 0 ? partial_ordering::less - : partial_ordering::greater); - } - constexpr operator weak_ordering() const noexcept { // NOLINT - return value_ == 0 - ? weak_ordering::equivalent - : (value_ < 0 ? weak_ordering::less : weak_ordering::greater); - } - // Comparisons - friend constexpr bool operator==( - strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.value_ == 0; - } - friend constexpr bool operator!=( - strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.value_ != 0; - } - friend constexpr bool operator<( - strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.value_ < 0; - } - friend constexpr bool operator<=( - strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.value_ <= 0; - } - friend constexpr bool operator>( - strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.value_ > 0; - } - friend constexpr bool operator>=( - strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { - return v.value_ >= 0; - } - friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>, - strong_ordering v) noexcept { - return 0 == v.value_; - } - friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>, - strong_ordering v) noexcept { - return 0 != v.value_; - } - friend constexpr bool operator<(compare_internal::OnlyLiteralZero<>, - strong_ordering v) noexcept { - return 0 < v.value_; - } - friend constexpr bool operator<=(compare_internal::OnlyLiteralZero<>, - strong_ordering v) noexcept { - return 0 <= v.value_; - } - friend constexpr bool operator>(compare_internal::OnlyLiteralZero<>, - strong_ordering v) noexcept { - return 0 > v.value_; - } - friend constexpr bool operator>=(compare_internal::OnlyLiteralZero<>, - strong_ordering v) noexcept { - return 0 >= v.value_; - } - - private: - compare_internal::value_type value_; - }; - PHMAP_COMPARE_INLINE_INIT(strong_ordering, less, compare_internal::ord::less); - PHMAP_COMPARE_INLINE_INIT(strong_ordering, equal, compare_internal::eq::equal); - PHMAP_COMPARE_INLINE_INIT(strong_ordering, equivalent, - compare_internal::eq::equivalent); - PHMAP_COMPARE_INLINE_INIT(strong_ordering, greater, - compare_internal::ord::greater); - -#undef PHMAP_COMPARE_INLINE_BASECLASS_DECL -#undef PHMAP_COMPARE_INLINE_SUBCLASS_DECL -#undef PHMAP_COMPARE_INLINE_INIT - - namespace compare_internal { - // We also provide these comparator adapter functions for internal phmap use. - - // Helper functions to do a boolean comparison of two keys given a boolean - // or three-way comparator. - // SFINAE prevents implicit conversions to bool (such as from int). - template ::value, int> = 0> - constexpr bool compare_result_as_less_than(const BoolType r) { return r; } - constexpr bool compare_result_as_less_than(const phmap::weak_ordering r) { - return r < 0; - } - - template - constexpr bool do_less_than_comparison(const Compare &compare, const K &x, - const LK &y) { - return compare_result_as_less_than(compare(x, y)); - } - - // Helper functions to do a three-way comparison of two keys given a boolean or - // three-way comparator. - // SFINAE prevents implicit conversions to int (such as from bool). - template ::value, int> = 0> - constexpr phmap::weak_ordering compare_result_as_ordering(const Int c) { - return c < 0 ? phmap::weak_ordering::less - : c == 0 ? phmap::weak_ordering::equivalent - : phmap::weak_ordering::greater; - } - constexpr phmap::weak_ordering compare_result_as_ordering( - const phmap::weak_ordering c) { - return c; - } - - template < - typename Compare, typename K, typename LK, - phmap::enable_if_t>::value, - int> = 0> - constexpr phmap::weak_ordering do_three_way_comparison(const Compare &compare, - const K &x, const LK &y) { - return compare_result_as_ordering(compare(x, y)); - } - template < - typename Compare, typename K, typename LK, - phmap::enable_if_t>::value, - int> = 0> - constexpr phmap::weak_ordering do_three_way_comparison(const Compare &compare, - const K &x, const LK &y) { - return compare(x, y) ? phmap::weak_ordering::less - : compare(y, x) ? phmap::weak_ordering::greater - : phmap::weak_ordering::equivalent; - } - - } // namespace compare_internal -} - - -namespace phmap { - -namespace priv { - - // A helper class that indicates if the Compare parameter is a key-compare-to - // comparator. - template - using btree_is_key_compare_to = - std::is_convertible, - phmap::weak_ordering>; - - struct StringBtreeDefaultLess { - using is_transparent = void; - - StringBtreeDefaultLess() = default; - - // Compatibility constructor. - StringBtreeDefaultLess(std::less) {} // NOLINT -#if PHMAP_HAVE_STD_STRING_VIEW - StringBtreeDefaultLess(std::less) {} // NOLINT - StringBtreeDefaultLess(phmap::Less) {} // NOLINT - - phmap::weak_ordering operator()(std::string_view lhs, - std::string_view rhs) const { - return compare_internal::compare_result_as_ordering(lhs.compare(rhs)); - } -#else - phmap::weak_ordering operator()(std::string lhs, - std::string rhs) const { - return compare_internal::compare_result_as_ordering(lhs.compare(rhs)); - } -#endif - }; - - struct StringBtreeDefaultGreater { - using is_transparent = void; - - StringBtreeDefaultGreater() = default; - - StringBtreeDefaultGreater(std::greater) {} // NOLINT -#if PHMAP_HAVE_STD_STRING_VIEW - StringBtreeDefaultGreater(std::greater) {} // NOLINT - - phmap::weak_ordering operator()(std::string_view lhs, - std::string_view rhs) const { - return compare_internal::compare_result_as_ordering(rhs.compare(lhs)); - } -#else - phmap::weak_ordering operator()(std::string lhs, - std::string rhs) const { - return compare_internal::compare_result_as_ordering(rhs.compare(lhs)); - } -#endif - }; - - // A helper class to convert a boolean comparison into a three-way "compare-to" - // comparison that returns a negative value to indicate less-than, zero to - // indicate equality and a positive value to indicate greater-than. This helper - // class is specialized for less, greater, - // less, and greater. - // - // key_compare_to_adapter is provided so that btree users - // automatically get the more efficient compare-to code when using common - // google string types with common comparison functors. - // These string-like specializations also turn on heterogeneous lookup by - // default. - template - struct key_compare_to_adapter { - using type = Compare; - }; - - template <> - struct key_compare_to_adapter> { - using type = StringBtreeDefaultLess; - }; - - template <> - struct key_compare_to_adapter> { - using type = StringBtreeDefaultLess; - }; - - template <> - struct key_compare_to_adapter> { - using type = StringBtreeDefaultGreater; - }; - -#if PHMAP_HAVE_STD_STRING_VIEW - template <> - struct key_compare_to_adapter> { - using type = StringBtreeDefaultLess; - }; - - template <> - struct key_compare_to_adapter> { - using type = StringBtreeDefaultLess; - }; - - template <> - struct key_compare_to_adapter> { - using type = StringBtreeDefaultGreater; - }; -#endif - - template - struct common_params { - // If Compare is a common comparator for a std::string-like type, then we adapt it - // to use heterogeneous lookup and to be a key-compare-to comparator. - using key_compare = typename key_compare_to_adapter::type; - // A type which indicates if we have a key-compare-to functor or a plain old - // key-compare functor. - using is_key_compare_to = btree_is_key_compare_to; - - using allocator_type = Alloc; - using key_type = Key; - using size_type = std::size_t ; - using difference_type = ptrdiff_t; - - // True if this is a multiset or multimap. - using is_multi_container = std::integral_constant; - - using slot_policy = SlotPolicy; - using slot_type = typename slot_policy::slot_type; - using value_type = typename slot_policy::value_type; - using init_type = typename slot_policy::mutable_value_type; - using pointer = value_type *; - using const_pointer = const value_type *; - using reference = value_type &; - using const_reference = const value_type &; - - enum { - kTargetNodeSize = TargetNodeSize, - - // Upper bound for the available space for values. This is largest for leaf - // nodes, which have overhead of at least a pointer + 4 bytes (for storing - // 3 field_types and an enum). - kNodeSlotSpace = - TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4), - }; - - // This is an integral type large enough to hold as many - // ValueSize-values as will fit a node of TargetNodeSize bytes. - using node_count_type = - phmap::conditional_t<(kNodeSlotSpace / sizeof(slot_type) > - (std::numeric_limits::max)()), - uint16_t, uint8_t>; // NOLINT - - // The following methods are necessary for passing this struct as PolicyTraits - // for node_handle and/or are used within btree. - static value_type &element(slot_type *slot) { - return slot_policy::element(slot); - } - static const value_type &element(const slot_type *slot) { - return slot_policy::element(slot); - } - template - static void construct(Alloc *alloc, slot_type *slot, Args &&... args) { - slot_policy::construct(alloc, slot, std::forward(args)...); - } - static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { - slot_policy::construct(alloc, slot, other); - } - static void destroy(Alloc *alloc, slot_type *slot) { - slot_policy::destroy(alloc, slot); - } - static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) { - construct(alloc, new_slot, old_slot); - destroy(alloc, old_slot); - } - static void swap(Alloc *alloc, slot_type *a, slot_type *b) { - slot_policy::swap(alloc, a, b); - } - static void move(Alloc *alloc, slot_type *src, slot_type *dest) { - slot_policy::move(alloc, src, dest); - } - static void move(Alloc *alloc, slot_type *first, slot_type *last, - slot_type *result) { - slot_policy::move(alloc, first, last, result); - } - }; - - // A parameters structure for holding the type parameters for a btree_map. - // Compare and Alloc should be nothrow copy-constructible. - template - struct map_params : common_params> { - using super_type = typename map_params::common_params; - using mapped_type = Data; - // This type allows us to move keys when it is safe to do so. It is safe - // for maps in which value_type and mutable_value_type are layout compatible. - using slot_policy = typename super_type::slot_policy; - using slot_type = typename super_type::slot_type; - using value_type = typename super_type::value_type; - using init_type = typename super_type::init_type; - - using key_compare = typename super_type::key_compare; - // Inherit from key_compare for empty base class optimization. - struct value_compare : private key_compare { - value_compare() = default; - explicit value_compare(const key_compare &cmp) : key_compare(cmp) {} - - template - auto operator()(const T &left, const U &right) const - -> decltype(std::declval()(left.first, right.first)) { - return key_compare::operator()(left.first, right.first); - } - }; - using is_map_container = std::true_type; - - static const Key &key(const value_type &x) { return x.first; } - static const Key &key(const init_type &x) { return x.first; } - static const Key &key(const slot_type *x) { return slot_policy::key(x); } - static mapped_type &value(value_type *value) { return value->second; } - }; - - // This type implements the necessary functions from the - // btree::priv::slot_type interface. - template - struct set_slot_policy { - using slot_type = Key; - using value_type = Key; - using mutable_value_type = Key; - - static value_type &element(slot_type *slot) { return *slot; } - static const value_type &element(const slot_type *slot) { return *slot; } - - template - static void construct(Alloc *alloc, slot_type *slot, Args &&... args) { - phmap::allocator_traits::construct(*alloc, slot, - std::forward(args)...); - } - - template - static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { - phmap::allocator_traits::construct(*alloc, slot, std::move(*other)); - } - - template - static void destroy(Alloc *alloc, slot_type *slot) { - phmap::allocator_traits::destroy(*alloc, slot); - } - - template - static void swap(Alloc * /*alloc*/, slot_type *a, slot_type *b) { - using std::swap; - swap(*a, *b); - } - - template - static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) { - *dest = std::move(*src); - } - - template - static void move(Alloc *alloc, slot_type *first, slot_type *last, - slot_type *result) { - for (slot_type *src = first, *dest = result; src != last; ++src, ++dest) - move(alloc, src, dest); - } - }; - - // A parameters structure for holding the type parameters for a btree_set. - // Compare and Alloc should be nothrow copy-constructible. - template - struct set_params : common_params> { - using value_type = Key; - using slot_type = typename set_params::common_params::slot_type; - using value_compare = typename set_params::common_params::key_compare; - using is_map_container = std::false_type; - - static const Key &key(const value_type &x) { return x; } - static const Key &key(const slot_type *x) { return *x; } - }; - - // An adapter class that converts a lower-bound compare into an upper-bound - // compare. Note: there is no need to make a version of this adapter specialized - // for key-compare-to functors because the upper-bound (the first value greater - // than the input) is never an exact match. - template - struct upper_bound_adapter { - explicit upper_bound_adapter(const Compare &c) : comp(c) {} - template - bool operator()(const K &a, const LK &b) const { - // Returns true when a is not greater than b. - return !phmap::compare_internal::compare_result_as_less_than(comp(b, a)); - } - - private: - Compare comp; - }; - - enum class MatchKind : uint8_t { kEq, kNe }; - - template - struct SearchResult { - V value; - MatchKind match; - - static constexpr bool HasMatch() { return true; } - bool IsEq() const { return match == MatchKind::kEq; } - }; - - // When we don't use CompareTo, `match` is not present. - // This ensures that callers can't use it accidentally when it provides no - // useful information. - template - struct SearchResult { - V value; - - static constexpr bool HasMatch() { return false; } - static constexpr bool IsEq() { return false; } - }; - - // A node in the btree holding. The same node type is used for both internal - // and leaf nodes in the btree, though the nodes are allocated in such a way - // that the children array is only valid in internal nodes. - template - class btree_node { - using is_key_compare_to = typename Params::is_key_compare_to; - using is_multi_container = typename Params::is_multi_container; - using field_type = typename Params::node_count_type; - using allocator_type = typename Params::allocator_type; - using slot_type = typename Params::slot_type; - - public: - using params_type = Params; - using key_type = typename Params::key_type; - using value_type = typename Params::value_type; - using pointer = typename Params::pointer; - using const_pointer = typename Params::const_pointer; - using reference = typename Params::reference; - using const_reference = typename Params::const_reference; - using key_compare = typename Params::key_compare; - using size_type = typename Params::size_type; - using difference_type = typename Params::difference_type; - - // Btree decides whether to use linear node search as follows: - // - If the key is arithmetic and the comparator is std::less or - // std::greater, choose linear. - // - Otherwise, choose binary. - // TODO(ezb): Might make sense to add condition(s) based on node-size. - using use_linear_search = std::integral_constant< - bool, - std::is_arithmetic::value && - (std::is_same, key_compare>::value || - std::is_same, key_compare>::value || - std::is_same, key_compare>::value)>; - - - ~btree_node() = default; - btree_node(btree_node const &) = delete; - btree_node &operator=(btree_node const &) = delete; - - // Public for EmptyNodeType. - constexpr static size_type Alignment() { - static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(), - "Alignment of all nodes must be equal."); - return (size_type)InternalLayout().Alignment(); - } - - protected: - btree_node() = default; - - private: - using layout_type = phmap::priv::Layout; - constexpr static size_type SizeWithNValues(size_type n) { - return (size_type)layout_type(/*parent*/ 1, - /*position, start, count, max_count*/ 4, - /*values*/ (size_t)n, - /*children*/ 0) - .AllocSize(); - } - // A lower bound for the overhead of fields other than values in a leaf node. - constexpr static size_type MinimumOverhead() { - return (size_type)(SizeWithNValues(1) - sizeof(value_type)); - } - - // Compute how many values we can fit onto a leaf node taking into account - // padding. - constexpr static size_type NodeTargetValues(const int begin, const int end) { - return begin == end ? begin - : SizeWithNValues((begin + end) / 2 + 1) > - params_type::kTargetNodeSize - ? NodeTargetValues(begin, (begin + end) / 2) - : NodeTargetValues((begin + end) / 2 + 1, end); - } - - enum { - kTargetNodeSize = params_type::kTargetNodeSize, - kNodeTargetValues = NodeTargetValues(0, params_type::kTargetNodeSize), - - // We need a minimum of 3 values per internal node in order to perform - // splitting (1 value for the two nodes involved in the split and 1 value - // propagated to the parent as the delimiter for the split). - kNodeValues = kNodeTargetValues >= 3 ? kNodeTargetValues : 3, - - // The node is internal (i.e. is not a leaf node) if and only if `max_count` - // has this value. - kInternalNodeMaxCount = 0, - }; - - // Leaves can have less than kNodeValues values. - constexpr static layout_type LeafLayout(const int max_values = kNodeValues) { - return layout_type(/*parent*/ 1, - /*position, start, count, max_count*/ 4, - /*values*/ (size_t)max_values, - /*children*/ 0); - } - constexpr static layout_type InternalLayout() { - return layout_type(/*parent*/ 1, - /*position, start, count, max_count*/ 4, - /*values*/ kNodeValues, - /*children*/ kNodeValues + 1); - } - constexpr static size_type LeafSize(const int max_values = kNodeValues) { - return (size_type)LeafLayout(max_values).AllocSize(); - } - constexpr static size_type InternalSize() { - return (size_type)InternalLayout().AllocSize(); - } - - // N is the index of the type in the Layout definition. - // ElementType is the Nth type in the Layout definition. - template - inline typename layout_type::template ElementType *GetField() { - // We assert that we don't read from values that aren't there. - assert(N < 3 || !leaf()); - return InternalLayout().template Pointer(reinterpret_cast(this)); - } - - template - inline const typename layout_type::template ElementType *GetField() const { - assert(N < 3 || !leaf()); - return InternalLayout().template Pointer( - reinterpret_cast(this)); - } - - void set_parent(btree_node *p) { *GetField<0>() = p; } - field_type &mutable_count() { return GetField<1>()[2]; } - slot_type *slot(size_type i) { return &GetField<2>()[i]; } - const slot_type *slot(size_type i) const { return &GetField<2>()[i]; } - void set_position(field_type v) { GetField<1>()[0] = v; } - void set_start(field_type v) { GetField<1>()[1] = v; } - void set_count(field_type v) { GetField<1>()[2] = v; } - void set_max_count(field_type v) { GetField<1>()[3] = v; } - - public: - // Whether this is a leaf node or not. This value doesn't change after the - // node is created. - bool leaf() const { return GetField<1>()[3] != kInternalNodeMaxCount; } - - // Getter for the position of this node in its parent. - field_type position() const { return GetField<1>()[0]; } - - // Getter for the offset of the first value in the `values` array. - field_type start() const { return GetField<1>()[1]; } - - // Getters for the number of values stored in this node. - field_type count() const { return GetField<1>()[2]; } - field_type max_count() const { - // Internal nodes have max_count==kInternalNodeMaxCount. - // Leaf nodes have max_count in [1, kNodeValues]. - const field_type max_cnt = GetField<1>()[3]; - return max_cnt == field_type{kInternalNodeMaxCount} - ? field_type{kNodeValues} - : max_cnt; - } - - // Getter for the parent of this node. - btree_node *parent() const { return *GetField<0>(); } - // Getter for whether the node is the root of the tree. The parent of the - // root of the tree is the leftmost node in the tree which is guaranteed to - // be a leaf. - bool is_root() const { return parent()->leaf(); } - void make_root() { - assert(parent()->is_root()); - set_parent(parent()->parent()); - } - - // Getters for the key/value at position i in the node. - const key_type &key(size_type i) const { return params_type::key(slot(i)); } - reference value(size_type i) { return params_type::element(slot(i)); } - const_reference value(size_type i) const { return params_type::element(slot(i)); } - - // Getters/setter for the child at position i in the node. - btree_node *child(size_type i) const { return GetField<3>()[i]; } - btree_node *&mutable_child(size_type i) { return GetField<3>()[i]; } - void clear_child(size_type i) { - phmap::priv::SanitizerPoisonObject(&mutable_child(i)); - } - void set_child(size_type i, btree_node *c) { - phmap::priv::SanitizerUnpoisonObject(&mutable_child(i)); - mutable_child(i) = c; - c->set_position((field_type)i); - } - void init_child(int i, btree_node *c) { - set_child(i, c); - c->set_parent(this); - } - - // Returns the position of the first value whose key is not less than k. - template - SearchResult lower_bound( - const K &k, const key_compare &comp) const { - return use_linear_search::value ? linear_search(k, comp) - : binary_search(k, comp); - } - // Returns the position of the first value whose key is greater than k. - template - int upper_bound(const K &k, const key_compare &comp) const { - auto upper_compare = upper_bound_adapter(comp); - return use_linear_search::value ? linear_search(k, upper_compare).value - : binary_search(k, upper_compare).value; - } - - template - SearchResult::value> - linear_search(const K &k, const Compare &comp) const { - return linear_search_impl(k, 0, count(), comp, - btree_is_key_compare_to()); - } - - template - SearchResult::value> - binary_search(const K &k, const Compare &comp) const { - return binary_search_impl(k, 0, count(), comp, - btree_is_key_compare_to()); - } - - // Returns the position of the first value whose key is not less than k using - // linear search performed using plain compare. - template - SearchResult linear_search_impl( - const K &k, int s, const int e, const Compare &comp, - std::false_type /* IsCompareTo */) const { - while (s < e) { - if (!comp(key(s), k)) { - break; - } - ++s; - } - return {s}; - } - - // Returns the position of the first value whose key is not less than k using - // linear search performed using compare-to. - template - SearchResult linear_search_impl( - const K &k, int s, const int e, const Compare &comp, - std::true_type /* IsCompareTo */) const { - while (s < e) { - const phmap::weak_ordering c = comp(key(s), k); - if (c == 0) { - return {s, MatchKind::kEq}; - } else if (c > 0) { - break; - } - ++s; - } - return {s, MatchKind::kNe}; - } - - // Returns the position of the first value whose key is not less than k using - // binary search performed using plain compare. - template - SearchResult binary_search_impl( - const K &k, int s, int e, const Compare &comp, - std::false_type /* IsCompareTo */) const { - while (s != e) { - const int mid = (s + e) >> 1; - if (comp(key(mid), k)) { - s = mid + 1; - } else { - e = mid; - } - } - return {s}; - } - - // Returns the position of the first value whose key is not less than k using - // binary search performed using compare-to. - template - SearchResult binary_search_impl( - const K &k, int s, int e, const CompareTo &comp, - std::true_type /* IsCompareTo */) const { - if (is_multi_container::value) { - MatchKind exact_match = MatchKind::kNe; - while (s != e) { - const int mid = (s + e) >> 1; - const phmap::weak_ordering c = comp(key(mid), k); - if (c < 0) { - s = mid + 1; - } else { - e = mid; - if (c == 0) { - // Need to return the first value whose key is not less than k, - // which requires continuing the binary search if this is a - // multi-container. - exact_match = MatchKind::kEq; - } - } - } - return {s, exact_match}; - } else { // Not a multi-container. - while (s != e) { - const int mid = (s + e) >> 1; - const phmap::weak_ordering c = comp(key(mid), k); - if (c < 0) { - s = mid + 1; - } else if (c > 0) { - e = mid; - } else { - return {mid, MatchKind::kEq}; - } - } - return {s, MatchKind::kNe}; - } - } - - // Emplaces a value at position i, shifting all existing values and - // children at positions >= i to the right by 1. - template - void emplace_value(size_type i, allocator_type *alloc, Args &&... args); - - // Removes the value at position i, shifting all existing values and children - // at positions > i to the left by 1. - void remove_value(int i, allocator_type *alloc); - - // Removes the values at positions [i, i + to_erase), shifting all values - // after that range to the left by to_erase. Does not change children at all. - void remove_values_ignore_children(int i, size_type to_erase, - allocator_type *alloc); - - // Rebalances a node with its right sibling. - void rebalance_right_to_left(int to_move, btree_node *right, - allocator_type *alloc); - void rebalance_left_to_right(int to_move, btree_node *right, - allocator_type *alloc); - - // Splits a node, moving a portion of the node's values to its right sibling. - void split(int insert_position, btree_node *dest, allocator_type *alloc); - - // Merges a node with its right sibling, moving all of the values and the - // delimiting key in the parent node onto itself. - void merge(btree_node *sibling, allocator_type *alloc); - - // Swap the contents of "this" and "src". - void swap(btree_node *src, allocator_type *alloc); - - // Node allocation/deletion routines. - static btree_node *init_leaf(btree_node *n, btree_node *parent, - int max_cnt) { - n->set_parent(parent); - n->set_position(0); - n->set_start(0); - n->set_count(0); - n->set_max_count((field_type)max_cnt); - phmap::priv::SanitizerPoisonMemoryRegion( - n->slot(0), max_cnt * sizeof(slot_type)); - return n; - } - static btree_node *init_internal(btree_node *n, btree_node *parent) { - init_leaf(n, parent, kNodeValues); - // Set `max_count` to a sentinel value to indicate that this node is - // internal. - n->set_max_count(kInternalNodeMaxCount); - phmap::priv::SanitizerPoisonMemoryRegion( - &n->mutable_child(0), (kNodeValues + 1) * sizeof(btree_node *)); - return n; - } - void destroy(allocator_type *alloc) { - for (int i = 0; i < count(); ++i) { - value_destroy(i, alloc); - } - } - - public: - // Exposed only for tests. - static bool testonly_uses_linear_node_search() { - return use_linear_search::value; - } - - private: - template - void value_init(const size_type i, allocator_type *alloc, Args &&... args) { - phmap::priv::SanitizerUnpoisonObject(slot(i)); - params_type::construct(alloc, slot(i), std::forward(args)...); - } - void value_destroy(const size_type i, allocator_type *alloc) { - params_type::destroy(alloc, slot(i)); - phmap::priv::SanitizerPoisonObject(slot(i)); - } - - // Move n values starting at value i in this node into the values starting at - // value j in node x. - void uninitialized_move_n(const size_type n, const size_type i, - const size_type j, btree_node *x, - allocator_type *alloc) { - phmap::priv::SanitizerUnpoisonMemoryRegion( - x->slot(j), n * sizeof(slot_type)); - for (slot_type *src = slot(i), *end = src + n, *dest = x->slot(j); - src != end; ++src, ++dest) { - params_type::construct(alloc, dest, src); - } - } - - // Destroys a range of n values, starting at index i. - void value_destroy_n(const size_type i, const size_type n, - allocator_type *alloc) { - for (size_type j = 0; j < n; ++j) { - value_destroy(i + j, alloc); - } - } - - template - friend class btree; - template - friend struct btree_iterator; - friend class BtreeNodePeer; - }; - - template - struct btree_iterator { - private: - using key_type = typename Node::key_type; - using size_type = typename Node::size_type; - using params_type = typename Node::params_type; - - using node_type = Node; - using normal_node = typename std::remove_const::type; - using const_node = const Node; - using normal_pointer = typename params_type::pointer; - using normal_reference = typename params_type::reference; - using const_pointer = typename params_type::const_pointer; - using const_reference = typename params_type::const_reference; - using slot_type = typename params_type::slot_type; - - using iterator = - btree_iterator; - using const_iterator = - btree_iterator; - - public: - // These aliases are public for std::iterator_traits. - using difference_type = typename Node::difference_type; - using value_type = typename params_type::value_type; - using pointer = Pointer; - using reference = Reference; - using iterator_category = std::bidirectional_iterator_tag; - - btree_iterator() : node(nullptr), position(-1) {} - btree_iterator(Node *n, int p) : node(n), position(p) {} - - // NOTE: this SFINAE allows for implicit conversions from iterator to - // const_iterator, but it specifically avoids defining copy constructors so - // that btree_iterator can be trivially copyable. This is for performance and - // binary size reasons. - template , iterator>::value && - std::is_same::value, - int> = 0> - btree_iterator(const btree_iterator &x) // NOLINT - : node(x.node), position(x.position) {} - - private: - // This SFINAE allows explicit conversions from const_iterator to - // iterator, but also avoids defining a copy constructor. - // NOTE: the const_cast is safe because this constructor is only called by - // non-const methods and the container owns the nodes. - template , const_iterator>::value && - std::is_same::value, - int> = 0> - explicit btree_iterator(const btree_iterator &x) - : node(const_cast(x.node)), position(x.position) {} - - // Increment/decrement the iterator. - void increment() { - if (node->leaf() && ++position < node->count()) { - return; - } - increment_slow(); - } - void increment_slow(); - - void decrement() { - if (node->leaf() && --position >= 0) { - return; - } - decrement_slow(); - } - void decrement_slow(); - - public: - bool operator==(const const_iterator &x) const { - return node == x.node && position == x.position; - } - bool operator!=(const const_iterator &x) const { - return node != x.node || position != x.position; - } - bool operator==(const iterator &x) const { - return node == x.node && position == x.position; - } - bool operator!=(const iterator &x) const { - return node != x.node || position != x.position; - } - - // Accessors for the key/value the iterator is pointing at. - reference operator*() const { - return node->value(position); - } - pointer operator->() const { - return &node->value(position); - } - - btree_iterator& operator++() { - increment(); - return *this; - } - btree_iterator& operator--() { - decrement(); - return *this; - } - btree_iterator operator++(int) { - btree_iterator tmp = *this; - ++*this; - return tmp; - } - btree_iterator operator--(int) { - btree_iterator tmp = *this; - --*this; - return tmp; - } - - private: - template - friend class btree; - template - friend class btree_container; - template - friend class btree_set_container; - template - friend class btree_map_container; - template - friend class btree_multiset_container; - template - friend struct btree_iterator; - template - friend class base_checker; - - const key_type &key() const { return node->key(position); } - slot_type *slot() { return node->slot(position); } - - // The node in the tree the iterator is pointing at. - Node *node; - // The position within the node of the tree the iterator is pointing at. - // TODO(ezb): make this a field_type - int position; - }; - - template - class btree { - using node_type = btree_node; - using is_key_compare_to = typename Params::is_key_compare_to; - - // We use a static empty node for the root/leftmost/rightmost of empty btrees - // in order to avoid branching in begin()/end(). - struct alignas(node_type::Alignment()) EmptyNodeType : node_type { - using field_type = typename node_type::field_type; - node_type *parent; - field_type position = 0; - field_type start = 0; - field_type count = 0; - // max_count must be != kInternalNodeMaxCount (so that this node is regarded - // as a leaf node). max_count() is never called when the tree is empty. - field_type max_count = node_type::kInternalNodeMaxCount + 1; - -#ifdef _MSC_VER - // MSVC has constexpr code generations bugs here. - EmptyNodeType() : parent(this) {} -#else - constexpr EmptyNodeType(node_type *p) : parent(p) {} -#endif - }; - - static node_type *EmptyNode() { -#ifdef _MSC_VER - static EmptyNodeType empty_node; - // This assert fails on some other construction methods. - assert(empty_node.parent == &empty_node); - return &empty_node; -#else - static constexpr EmptyNodeType empty_node( - const_cast(&empty_node)); - return const_cast(&empty_node); -#endif - } - - enum { - kNodeValues = node_type::kNodeValues, - kMinNodeValues = kNodeValues / 2, - }; - - struct node_stats { - using size_type = typename Params::size_type; - - node_stats(size_type l, size_type i) - : leaf_nodes(l), - internal_nodes(i) { - } - - node_stats& operator+=(const node_stats &x) { - leaf_nodes += x.leaf_nodes; - internal_nodes += x.internal_nodes; - return *this; - } - - size_type leaf_nodes; - size_type internal_nodes; - }; - - public: - using key_type = typename Params::key_type; - using value_type = typename Params::value_type; - using size_type = typename Params::size_type; - using difference_type = typename Params::difference_type; - using key_compare = typename Params::key_compare; - using value_compare = typename Params::value_compare; - using allocator_type = typename Params::allocator_type; - using reference = typename Params::reference; - using const_reference = typename Params::const_reference; - using pointer = typename Params::pointer; - using const_pointer = typename Params::const_pointer; - using iterator = btree_iterator; - using const_iterator = typename iterator::const_iterator; - using reverse_iterator = std::reverse_iterator; - using const_reverse_iterator = std::reverse_iterator; - using node_handle_type = node_handle; - - // Internal types made public for use by btree_container types. - using params_type = Params; - using slot_type = typename Params::slot_type; - - private: - // For use in copy_or_move_values_in_order. - const value_type &maybe_move_from_iterator(const_iterator x) { return *x; } - value_type &&maybe_move_from_iterator(iterator x) { return std::move(*x); } - - // Copies or moves (depending on the template parameter) the values in - // x into this btree in their order in x. This btree must be empty before this - // method is called. This method is used in copy construction, copy - // assignment, and move assignment. - template - void copy_or_move_values_in_order(Btree *x); - - // Validates that various assumptions/requirements are true at compile time. - constexpr static bool static_assert_validation(); - - public: - btree(const key_compare &comp, const allocator_type &alloc); - - btree(const btree &x); - btree(btree &&x) noexcept - : root_(std::move(x.root_)), - rightmost_(phmap::exchange(x.rightmost_, EmptyNode())), - size_(phmap::exchange(x.size_, 0)) { - x.mutable_root() = EmptyNode(); - } - - ~btree() { - // Put static_asserts in destructor to avoid triggering them before the type - // is complete. - static_assert(static_assert_validation(), "This call must be elided."); - clear(); - } - - // Assign the contents of x to *this. - btree &operator=(const btree &x); - btree &operator=(btree &&x) noexcept; - - iterator begin() { - return iterator(leftmost(), 0); - } - const_iterator begin() const { - return const_iterator(leftmost(), 0); - } - iterator end() { return iterator(rightmost_, rightmost_->count()); } - const_iterator end() const { - return const_iterator(rightmost_, rightmost_->count()); - } - reverse_iterator rbegin() { - return reverse_iterator(end()); - } - const_reverse_iterator rbegin() const { - return const_reverse_iterator(end()); - } - reverse_iterator rend() { - return reverse_iterator(begin()); - } - const_reverse_iterator rend() const { - return const_reverse_iterator(begin()); - } - - // Finds the first element whose key is not less than key. - template - iterator lower_bound(const K &key) { - return internal_end(internal_lower_bound(key)); - } - template - const_iterator lower_bound(const K &key) const { - return internal_end(internal_lower_bound(key)); - } - - // Finds the first element whose key is greater than key. - template - iterator upper_bound(const K &key) { - return internal_end(internal_upper_bound(key)); - } - template - const_iterator upper_bound(const K &key) const { - return internal_end(internal_upper_bound(key)); - } - - // Finds the range of values which compare equal to key. The first member of - // the returned pair is equal to lower_bound(key). The second member pair of - // the pair is equal to upper_bound(key). - template - std::pair equal_range(const K &key) { - return {lower_bound(key), upper_bound(key)}; - } - template - std::pair equal_range(const K &key) const { - return {lower_bound(key), upper_bound(key)}; - } - - // Inserts a value into the btree only if it does not already exist. The - // boolean return value indicates whether insertion succeeded or failed. - // Requirement: if `key` already exists in the btree, does not consume `args`. - // Requirement: `key` is never referenced after consuming `args`. - template - std::pair insert_unique(const key_type &key, Args &&... args); - - // Inserts with hint. Checks to see if the value should be placed immediately - // before `position` in the tree. If so, then the insertion will take - // amortized constant time. If not, the insertion will take amortized - // logarithmic time as if a call to insert_unique() were made. - // Requirement: if `key` already exists in the btree, does not consume `args`. - // Requirement: `key` is never referenced after consuming `args`. - template - std::pair insert_hint_unique(iterator position, - const key_type &key, - Args &&... args); - - // Insert a range of values into the btree. - template - void insert_iterator_unique(InputIterator b, InputIterator e); - - // Inserts a value into the btree. - template - iterator insert_multi(const key_type &key, ValueType &&v); - - // Inserts a value into the btree. - template - iterator insert_multi(ValueType &&v) { - return insert_multi(params_type::key(v), std::forward(v)); - } - - // Insert with hint. Check to see if the value should be placed immediately - // before position in the tree. If it does, then the insertion will take - // amortized constant time. If not, the insertion will take amortized - // logarithmic time as if a call to insert_multi(v) were made. - template - iterator insert_hint_multi(iterator position, ValueType &&v); - - // Insert a range of values into the btree. - template - void insert_iterator_multi(InputIterator b, InputIterator e); - - // Erase the specified iterator from the btree. The iterator must be valid - // (i.e. not equal to end()). Return an iterator pointing to the node after - // the one that was erased (or end() if none exists). - // Requirement: does not read the value at `*iter`. - iterator erase(iterator iter); - - // Erases range. Returns the number of keys erased and an iterator pointing - // to the element after the last erased element. - std::pair erase(iterator begin, iterator end); - - // Erases the specified key from the btree. Returns 1 if an element was - // erased and 0 otherwise. - template - size_type erase_unique(const K &key); - - // Erases all of the entries matching the specified key from the - // btree. Returns the number of elements erased. - template - size_type erase_multi(const K &key); - - // Finds the iterator corresponding to a key or returns end() if the key is - // not present. - template - iterator find(const K &key) { - return internal_end(internal_find(key)); - } - template - const_iterator find(const K &key) const { - return internal_end(internal_find(key)); - } - - // Returns a count of the number of times the key appears in the btree. - template - size_type count_unique(const K &key) const { - const iterator beg = internal_find(key); - if (beg.node == nullptr) { - // The key doesn't exist in the tree. - return 0; - } - return 1; - } - // Returns a count of the number of times the key appears in the btree. - template - size_type count_multi(const K &key) const { - const auto range = equal_range(key); - return std::distance(range.first, range.second); - } - - // Clear the btree, deleting all of the values it contains. - void clear(); - - // Swap the contents of *this and x. - void swap(btree &x); - - const key_compare &key_comp() const noexcept { - return root_.template get<0>(); - } - template - bool compare_keys(const K &x, const LK &y) const { - return compare_internal::compare_result_as_less_than(key_comp()(x, y)); - } - - value_compare value_comp() const { return value_compare(key_comp()); } - - // Verifies the structure of the btree. - void verify() const; - - // Size routines. - size_type size() const { return size_; } - size_type max_size() const { return (std::numeric_limits::max)(); } - bool empty() const { return size_ == 0; } - - // The height of the btree. An empty tree will have height 0. - size_type height() const { - size_type h = 0; - if (!empty()) { - // Count the length of the chain from the leftmost node up to the - // root. We actually count from the root back around to the level below - // the root, but the calculation is the same because of the circularity - // of that traversal. - const node_type *n = root(); - do { - ++h; - n = n->parent(); - } while (n != root()); - } - return h; - } - - // The number of internal, leaf and total nodes used by the btree. - size_type leaf_nodes() const { - return internal_stats(root()).leaf_nodes; - } - size_type internal_nodes() const { - return internal_stats(root()).internal_nodes; - } - size_type nodes() const { - node_stats stats = internal_stats(root()); - return stats.leaf_nodes + stats.internal_nodes; - } - - // The total number of bytes used by the btree. - size_type bytes_used() const { - node_stats stats = internal_stats(root()); - if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) { - return sizeof(*this) + - node_type::LeafSize(root()->max_count()); - } else { - return sizeof(*this) + - stats.leaf_nodes * node_type::LeafSize() + - stats.internal_nodes * node_type::InternalSize(); - } - } - - // The average number of bytes used per value stored in the btree. - static double average_bytes_per_value() { - // Returns the number of bytes per value on a leaf node that is 75% - // full. Experimentally, this matches up nicely with the computed number of - // bytes per value in trees that had their values inserted in random order. - return node_type::LeafSize() / (kNodeValues * 0.75); - } - - // The fullness of the btree. Computed as the number of elements in the btree - // divided by the maximum number of elements a tree with the current number - // of nodes could hold. A value of 1 indicates perfect space - // utilization. Smaller values indicate space wastage. - // Returns 0 for empty trees. - double fullness() const { - if (empty()) return 0.0; - return static_cast(size()) / (nodes() * kNodeValues); - } - // The overhead of the btree structure in bytes per node. Computed as the - // total number of bytes used by the btree minus the number of bytes used for - // storing elements divided by the number of elements. - // Returns 0 for empty trees. - double overhead() const { - if (empty()) return 0.0; - return (bytes_used() - size() * sizeof(value_type)) / - static_cast(size()); - } - - // The allocator used by the btree. - allocator_type get_allocator() const { - return allocator(); - } - - private: - // Internal accessor routines. - node_type *root() { return root_.template get<2>(); } - const node_type *root() const { return root_.template get<2>(); } - node_type *&mutable_root() noexcept { return root_.template get<2>(); } - key_compare *mutable_key_comp() noexcept { return &root_.template get<0>(); } - - // The leftmost node is stored as the parent of the root node. - node_type *leftmost() { return root()->parent(); } - const node_type *leftmost() const { return root()->parent(); } - - // Allocator routines. - allocator_type *mutable_allocator() noexcept { - return &root_.template get<1>(); - } - const allocator_type &allocator() const noexcept { - return root_.template get<1>(); - } - - // Allocates a correctly aligned node of at least size bytes using the - // allocator. - node_type *allocate(const size_type sz) { - return reinterpret_cast( - phmap::priv::Allocate( - mutable_allocator(), (size_t)sz)); - } - - // Node creation/deletion routines. - node_type* new_internal_node(node_type *parent) { - node_type *p = allocate(node_type::InternalSize()); - return node_type::init_internal(p, parent); - } - node_type* new_leaf_node(node_type *parent) { - node_type *p = allocate(node_type::LeafSize()); - return node_type::init_leaf(p, parent, kNodeValues); - } - node_type *new_leaf_root_node(const int max_count) { - node_type *p = allocate(node_type::LeafSize(max_count)); - return node_type::init_leaf(p, p, max_count); - } - - // Deletion helper routines. - void erase_same_node(iterator begin, iterator end); - iterator erase_from_leaf_node(iterator begin, size_type to_erase); - iterator rebalance_after_delete(iterator iter); - - // Deallocates a node of a certain size in bytes using the allocator. - void deallocate(const size_type sz, node_type *node) { - phmap::priv::Deallocate( - mutable_allocator(), node, (size_t)sz); - } - - void delete_internal_node(node_type *node) { - node->destroy(mutable_allocator()); - deallocate(node_type::InternalSize(), node); - } - void delete_leaf_node(node_type *node) { - node->destroy(mutable_allocator()); - deallocate(node_type::LeafSize(node->max_count()), node); - } - - // Rebalances or splits the node iter points to. - void rebalance_or_split(iterator *iter); - - // Merges the values of left, right and the delimiting key on their parent - // onto left, removing the delimiting key and deleting right. - void merge_nodes(node_type *left, node_type *right); - - // Tries to merge node with its left or right sibling, and failing that, - // rebalance with its left or right sibling. Returns true if a merge - // occurred, at which point it is no longer valid to access node. Returns - // false if no merging took place. - bool try_merge_or_rebalance(iterator *iter); - - // Tries to shrink the height of the tree by 1. - void try_shrink(); - - iterator internal_end(iterator iter) { - return iter.node != nullptr ? iter : end(); - } - const_iterator internal_end(const_iterator iter) const { - return iter.node != nullptr ? iter : end(); - } - - // Emplaces a value into the btree immediately before iter. Requires that - // key(v) <= iter.key() and (--iter).key() <= key(v). - template - iterator internal_emplace(iterator iter, Args &&... args); - - // Returns an iterator pointing to the first value >= the value "iter" is - // pointing at. Note that "iter" might be pointing to an invalid location as - // iter.position == iter.node->count(). This routine simply moves iter up in - // the tree to a valid location. - // Requires: iter.node is non-null. - template - static IterType internal_last(IterType iter); - - // Returns an iterator pointing to the leaf position at which key would - // reside in the tree. We provide 2 versions of internal_locate. The first - // version uses a less-than comparator and is incapable of distinguishing when - // there is an exact match. The second version is for the key-compare-to - // specialization and distinguishes exact matches. The key-compare-to - // specialization allows the caller to avoid a subsequent comparison to - // determine if an exact match was made, which is important for keys with - // expensive comparison, such as strings. - template - SearchResult internal_locate( - const K &key) const; - - template - SearchResult internal_locate_impl( - const K &key, std::false_type /* IsCompareTo */) const; - - template - SearchResult internal_locate_impl( - const K &key, std::true_type /* IsCompareTo */) const; - - // Internal routine which implements lower_bound(). - template - iterator internal_lower_bound(const K &key) const; - - // Internal routine which implements upper_bound(). - template - iterator internal_upper_bound(const K &key) const; - - // Internal routine which implements find(). - template - iterator internal_find(const K &key) const; - - // Deletes a node and all of its children. - void internal_clear(node_type *node); - - // Verifies the tree structure of node. - int internal_verify(const node_type *node, - const key_type *lo, const key_type *hi) const; - - node_stats internal_stats(const node_type *node) const { - // The root can be a static empty node. - if (node == nullptr || (node == root() && empty())) { - return node_stats(0, 0); - } - if (node->leaf()) { - return node_stats(1, 0); - } - node_stats res(0, 1); - for (int i = 0; i <= node->count(); ++i) { - res += internal_stats(node->child(i)); - } - return res; - } - - public: - // Exposed only for tests. - static bool testonly_uses_linear_node_search() { - return node_type::testonly_uses_linear_node_search(); - } - - private: - // We use compressed tuple in order to save space because key_compare and - // allocator_type are usually empty. - phmap::priv::CompressedTuple - root_; - - // A pointer to the rightmost node. Note that the leftmost node is stored as - // the root's parent. - node_type *rightmost_; - - // Number of values. - size_type size_; - }; - - //// - // btree_node methods - template - template - inline void btree_node

::emplace_value(const size_type i, - allocator_type *alloc, - Args &&... args) { - assert(i <= count()); - // Shift old values to create space for new value and then construct it in - // place. - if (i < count()) { - value_init(count(), alloc, slot(count() - 1)); - for (size_type j = count() - 1; j > i; --j) - params_type::move(alloc, slot(j - 1), slot(j)); - value_destroy(i, alloc); - } - value_init(i, alloc, std::forward(args)...); - set_count((field_type)(count() + 1)); - - if (!leaf() && count() > i + 1) { - for (int j = count(); j > (int)(i + 1); --j) { - set_child(j, child(j - 1)); - } - clear_child(i + 1); - } - } - - template - inline void btree_node

::remove_value(const int i, allocator_type *alloc) { - if (!leaf() && count() > i + 1) { - assert(child(i + 1)->count() == 0); - for (size_type j = i + 1; j < count(); ++j) { - set_child(j, child(j + 1)); - } - clear_child(count()); - } - - remove_values_ignore_children(i, /*to_erase=*/1, alloc); - } - - template - inline void btree_node

::remove_values_ignore_children( - int i, size_type to_erase, allocator_type *alloc) { - params_type::move(alloc, slot(i + to_erase), slot(count()), slot(i)); - value_destroy_n(count() - to_erase, to_erase, alloc); - set_count((field_type)(count() - to_erase)); - } - - template - void btree_node

::rebalance_right_to_left(const int to_move, - btree_node *right, - allocator_type *alloc) { - assert(parent() == right->parent()); - assert(position() + 1 == right->position()); - assert(right->count() >= count()); - assert(to_move >= 1); - assert(to_move <= right->count()); - - // 1) Move the delimiting value in the parent to the left node. - value_init(count(), alloc, parent()->slot(position())); - - // 2) Move the (to_move - 1) values from the right node to the left node. - right->uninitialized_move_n(to_move - 1, 0, count() + 1, this, alloc); - - // 3) Move the new delimiting value to the parent from the right node. - params_type::move(alloc, right->slot(to_move - 1), - parent()->slot(position())); - - // 4) Shift the values in the right node to their correct position. - params_type::move(alloc, right->slot(to_move), right->slot(right->count()), - right->slot(0)); - - // 5) Destroy the now-empty to_move entries in the right node. - right->value_destroy_n(right->count() - to_move, to_move, alloc); - - if (!leaf()) { - // Move the child pointers from the right to the left node. - for (int i = 0; i < to_move; ++i) { - init_child(count() + i + 1, right->child(i)); - } - for (int i = 0; i <= right->count() - to_move; ++i) { - assert(i + to_move <= right->max_count()); - right->init_child(i, right->child(i + to_move)); - right->clear_child(i + to_move); - } - } - - // Fixup the counts on the left and right nodes. - set_count((field_type)(count() + to_move)); - right->set_count((field_type)(right->count() - to_move)); - } - - template - void btree_node

::rebalance_left_to_right(const int to_move, - btree_node *right, - allocator_type *alloc) { - assert(parent() == right->parent()); - assert(position() + 1 == right->position()); - assert(count() >= right->count()); - assert(to_move >= 1); - assert(to_move <= count()); - - // Values in the right node are shifted to the right to make room for the - // new to_move values. Then, the delimiting value in the parent and the - // other (to_move - 1) values in the left node are moved into the right node. - // Lastly, a new delimiting value is moved from the left node into the - // parent, and the remaining empty left node entries are destroyed. - - if (right->count() >= to_move) { - // The original location of the right->count() values are sufficient to hold - // the new to_move entries from the parent and left node. - - // 1) Shift existing values in the right node to their correct positions. - right->uninitialized_move_n(to_move, right->count() - to_move, - right->count(), right, alloc); - if (right->count() > to_move) { - for (slot_type *src = right->slot(right->count() - to_move - 1), - *dest = right->slot(right->count() - 1), - *end = right->slot(0); - src >= end; --src, --dest) { - params_type::move(alloc, src, dest); - } - } - - // 2) Move the delimiting value in the parent to the right node. - params_type::move(alloc, parent()->slot(position()), - right->slot(to_move - 1)); - - // 3) Move the (to_move - 1) values from the left node to the right node. - params_type::move(alloc, slot(count() - (to_move - 1)), slot(count()), - right->slot(0)); - } else { - // The right node does not have enough initialized space to hold the new - // to_move entries, so part of them will move to uninitialized space. - - // 1) Shift existing values in the right node to their correct positions. - right->uninitialized_move_n(right->count(), 0, to_move, right, alloc); - - // 2) Move the delimiting value in the parent to the right node. - right->value_init(to_move - 1, alloc, parent()->slot(position())); - - // 3) Move the (to_move - 1) values from the left node to the right node. - const size_type uninitialized_remaining = to_move - right->count() - 1; - uninitialized_move_n(uninitialized_remaining, - count() - uninitialized_remaining, right->count(), - right, alloc); - params_type::move(alloc, slot(count() - (to_move - 1)), - slot(count() - uninitialized_remaining), right->slot(0)); - } - - // 4) Move the new delimiting value to the parent from the left node. - params_type::move(alloc, slot(count() - to_move), parent()->slot(position())); - - // 5) Destroy the now-empty to_move entries in the left node. - value_destroy_n(count() - to_move, to_move, alloc); - - if (!leaf()) { - // Move the child pointers from the left to the right node. - for (int i = right->count(); i >= 0; --i) { - right->init_child(i + to_move, right->child(i)); - right->clear_child(i); - } - for (int i = 1; i <= to_move; ++i) { - right->init_child(i - 1, child(count() - to_move + i)); - clear_child(count() - to_move + i); - } - } - - // Fixup the counts on the left and right nodes. - set_count((field_type)(count() - to_move)); - right->set_count((field_type)(right->count() + to_move)); - } - - template - void btree_node

::split(const int insert_position, btree_node *dest, - allocator_type *alloc) { - assert(dest->count() == 0); - assert(max_count() == kNodeValues); - - // We bias the split based on the position being inserted. If we're - // inserting at the beginning of the left node then bias the split to put - // more values on the right node. If we're inserting at the end of the - // right node then bias the split to put more values on the left node. - if (insert_position == 0) { - dest->set_count((field_type)(count() - 1)); - } else if (insert_position == kNodeValues) { - dest->set_count(0); - } else { - dest->set_count((field_type)(count() / 2)); - } - set_count((field_type)(count() - dest->count())); - assert(count() >= 1); - - // Move values from the left sibling to the right sibling. - uninitialized_move_n(dest->count(), count(), 0, dest, alloc); - - // Destroy the now-empty entries in the left node. - value_destroy_n(count(), dest->count(), alloc); - - // The split key is the largest value in the left sibling. - set_count((field_type)(count() - 1)); - parent()->emplace_value(position(), alloc, slot(count())); - value_destroy(count(), alloc); - parent()->init_child(position() + 1, dest); - - if (!leaf()) { - for (int i = 0; i <= dest->count(); ++i) { - assert(child(count() + i + 1) != nullptr); - dest->init_child(i, child(count() + i + 1)); - clear_child(count() + i + 1); - } - } - } - - template - void btree_node

::merge(btree_node *src, allocator_type *alloc) { - assert(parent() == src->parent()); - assert(position() + 1 == src->position()); - - // Move the delimiting value to the left node. - value_init(count(), alloc, parent()->slot(position())); - - // Move the values from the right to the left node. - src->uninitialized_move_n(src->count(), 0, count() + 1, this, alloc); - - // Destroy the now-empty entries in the right node. - src->value_destroy_n(0, src->count(), alloc); - - if (!leaf()) { - // Move the child pointers from the right to the left node. - for (int i = 0; i <= src->count(); ++i) { - init_child(count() + i + 1, src->child(i)); - src->clear_child(i); - } - } - - // Fixup the counts on the src and dest nodes. - set_count((field_type)(1 + count() + src->count())); - src->set_count(0); - - // Remove the value on the parent node. - parent()->remove_value(position(), alloc); - } - - template - void btree_node

::swap(btree_node *x, allocator_type *alloc) { - using std::swap; - assert(leaf() == x->leaf()); - - // Determine which is the smaller/larger node. - btree_node *smaller = this, *larger = x; - if (smaller->count() > larger->count()) { - swap(smaller, larger); - } - - // Swap the values. - for (slot_type *a = smaller->slot(0), *b = larger->slot(0), - *end = a + smaller->count(); - a != end; ++a, ++b) { - params_type::swap(alloc, a, b); - } - - // Move values that can't be swapped. - const size_type to_move = larger->count() - smaller->count(); - larger->uninitialized_move_n(to_move, smaller->count(), smaller->count(), - smaller, alloc); - larger->value_destroy_n(smaller->count(), to_move, alloc); - - if (!leaf()) { - // Swap the child pointers. - std::swap_ranges(&smaller->mutable_child(0), - &smaller->mutable_child(smaller->count() + 1), - &larger->mutable_child(0)); - // Update swapped children's parent pointers. - int i = 0; - for (; i <= smaller->count(); ++i) { - smaller->child(i)->set_parent(smaller); - larger->child(i)->set_parent(larger); - } - // Move the child pointers that couldn't be swapped. - for (; i <= larger->count(); ++i) { - smaller->init_child(i, larger->child(i)); - larger->clear_child(i); - } - } - - // Swap the counts. - swap(mutable_count(), x->mutable_count()); - } - - //// - // btree_iterator methods - template - void btree_iterator::increment_slow() { - if (node->leaf()) { - assert(position >= node->count()); - btree_iterator save(*this); - while (position == node->count() && !node->is_root()) { - assert(node->parent()->child(node->position()) == node); - position = node->position(); - node = node->parent(); - } - if (position == node->count()) { - *this = save; - } - } else { - assert(position < node->count()); - node = node->child(position + 1); - while (!node->leaf()) { - node = node->child(0); - } - position = 0; - } - } - - template - void btree_iterator::decrement_slow() { - if (node->leaf()) { - assert(position <= -1); - btree_iterator save(*this); - while (position < 0 && !node->is_root()) { - assert(node->parent()->child(node->position()) == node); - position = node->position() - 1; - node = node->parent(); - } - if (position < 0) { - *this = save; - } - } else { - assert(position >= 0); - node = node->child(position); - while (!node->leaf()) { - node = node->child(node->count()); - } - position = node->count() - 1; - } - } - - //// - // btree methods - template - template - void btree

::copy_or_move_values_in_order(Btree *x) { - static_assert(std::is_same::value || - std::is_same::value, - "Btree type must be same or const."); - assert(empty()); - - // We can avoid key comparisons because we know the order of the - // values is the same order we'll store them in. - auto iter = x->begin(); - if (iter == x->end()) return; - insert_multi(maybe_move_from_iterator(iter)); - ++iter; - for (; iter != x->end(); ++iter) { - // If the btree is not empty, we can just insert the new value at the end - // of the tree. - internal_emplace(end(), maybe_move_from_iterator(iter)); - } - } - - template - constexpr bool btree

::static_assert_validation() { - static_assert(std::is_nothrow_copy_constructible::value, - "Key comparison must be nothrow copy constructible"); - static_assert(std::is_nothrow_copy_constructible::value, - "Allocator must be nothrow copy constructible"); - static_assert(type_traits_internal::is_trivially_copyable::value, - "iterator not trivially copyable."); - - // Note: We assert that kTargetValues, which is computed from - // Params::kTargetNodeSize, must fit the node_type::field_type. - static_assert( - kNodeValues < (1 << (8 * sizeof(typename node_type::field_type))), - "target node size too large"); - - // Verify that key_compare returns an phmap::{weak,strong}_ordering or bool. - using compare_result_type = - phmap::invoke_result_t; - static_assert( - std::is_same::value || - std::is_convertible::value, - "key comparison function must return phmap::{weak,strong}_ordering or " - "bool."); - - // Test the assumption made in setting kNodeSlotSpace. - static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4, - "node space assumption incorrect"); - - return true; - } - - template - btree

::btree(const key_compare &comp, const allocator_type &alloc) - : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {} - - template - btree

::btree(const btree &x) : btree(x.key_comp(), x.allocator()) { - copy_or_move_values_in_order(&x); - } - - template - template - auto btree

::insert_unique(const key_type &key, Args &&... args) - -> std::pair { - if (empty()) { - mutable_root() = rightmost_ = new_leaf_root_node(1); - } - - auto res = internal_locate(key); - iterator &iter = res.value; - - if (res.HasMatch()) { - if (res.IsEq()) { - // The key already exists in the tree, do nothing. - return {iter, false}; - } - } else { - iterator last = internal_last(iter); - if (last.node && !compare_keys(key, last.key())) { - // The key already exists in the tree, do nothing. - return {last, false}; - } - } - return {internal_emplace(iter, std::forward(args)...), true}; - } - - template - template - inline auto btree

::insert_hint_unique(iterator position, const key_type &key, - Args &&... args) - -> std::pair { - if (!empty()) { - if (position == end() || compare_keys(key, position.key())) { - iterator prev = position; - if (position == begin() || compare_keys((--prev).key(), key)) { - // prev.key() < key < position.key() - return {internal_emplace(position, std::forward(args)...), true}; - } - } else if (compare_keys(position.key(), key)) { - ++position; - if (position == end() || compare_keys(key, position.key())) { - // {original `position`}.key() < key < {current `position`}.key() - return {internal_emplace(position, std::forward(args)...), true}; - } - } else { - // position.key() == key - return {position, false}; - } - } - return insert_unique(key, std::forward(args)...); - } - - template - template - void btree

::insert_iterator_unique(InputIterator b, InputIterator e) { - for (; b != e; ++b) { - insert_hint_unique(end(), params_type::key(*b), *b); - } - } - - template - template - auto btree

::insert_multi(const key_type &key, ValueType &&v) -> iterator { - if (empty()) { - mutable_root() = rightmost_ = new_leaf_root_node(1); - } - - iterator iter = internal_upper_bound(key); - if (iter.node == nullptr) { - iter = end(); - } - return internal_emplace(iter, std::forward(v)); - } - - template - template - auto btree

::insert_hint_multi(iterator position, ValueType &&v) -> iterator { - if (!empty()) { - const key_type &key = params_type::key(v); - if (position == end() || !compare_keys(position.key(), key)) { - iterator prev = position; - if (position == begin() || !compare_keys(key, (--prev).key())) { - // prev.key() <= key <= position.key() - return internal_emplace(position, std::forward(v)); - } - } else { - iterator next = position; - ++next; - if (next == end() || !compare_keys(next.key(), key)) { - // position.key() < key <= next.key() - return internal_emplace(next, std::forward(v)); - } - } - } - return insert_multi(std::forward(v)); - } - - template - template - void btree

::insert_iterator_multi(InputIterator b, InputIterator e) { - for (; b != e; ++b) { - insert_hint_multi(end(), *b); - } - } - - template - auto btree

::operator=(const btree &x) -> btree & { - if (this != &x) { - clear(); - - *mutable_key_comp() = x.key_comp(); - if (phmap::allocator_traits< - allocator_type>::propagate_on_container_copy_assignment::value) { - *mutable_allocator() = x.allocator(); - } - - copy_or_move_values_in_order(&x); - } - return *this; - } - - template - auto btree

::operator=(btree &&x) noexcept -> btree & { - if (this != &x) { - clear(); - - using std::swap; - if (phmap::allocator_traits< - allocator_type>::propagate_on_container_copy_assignment::value) { - // Note: `root_` also contains the allocator and the key comparator. - swap(root_, x.root_); - swap(rightmost_, x.rightmost_); - swap(size_, x.size_); - } else { - if (allocator() == x.allocator()) { - swap(mutable_root(), x.mutable_root()); - swap(*mutable_key_comp(), *x.mutable_key_comp()); - swap(rightmost_, x.rightmost_); - swap(size_, x.size_); - } else { - // We aren't allowed to propagate the allocator and the allocator is - // different so we can't take over its memory. We must move each element - // individually. We need both `x` and `this` to have `x`s key comparator - // while moving the values so we can't swap the key comparators. - *mutable_key_comp() = x.key_comp(); - copy_or_move_values_in_order(&x); - } - } - } - return *this; - } - - template - auto btree

::erase(iterator iter) -> iterator { - bool internal_delete = false; - if (!iter.node->leaf()) { - // Deletion of a value on an internal node. First, move the largest value - // from our left child here, then delete that position (in remove_value() - // below). We can get to the largest value from our left child by - // decrementing iter. - iterator internal_iter(iter); - --iter; - assert(iter.node->leaf()); - params_type::move(mutable_allocator(), iter.node->slot(iter.position), - internal_iter.node->slot(internal_iter.position)); - internal_delete = true; - } - - // Delete the key from the leaf. - iter.node->remove_value(iter.position, mutable_allocator()); - --size_; - - // We want to return the next value after the one we just erased. If we - // erased from an internal node (internal_delete == true), then the next - // value is ++(++iter). If we erased from a leaf node (internal_delete == - // false) then the next value is ++iter. Note that ++iter may point to an - // internal node and the value in the internal node may move to a leaf node - // (iter.node) when rebalancing is performed at the leaf level. - - iterator res = rebalance_after_delete(iter); - - // If we erased from an internal node, advance the iterator. - if (internal_delete) { - ++res; - } - return res; - } - - template - auto btree

::rebalance_after_delete(iterator iter) -> iterator { - // Merge/rebalance as we walk back up the tree. - iterator res(iter); - bool first_iteration = true; - for (;;) { - if (iter.node == root()) { - try_shrink(); - if (empty()) { - return end(); - } - break; - } - if (iter.node->count() >= kMinNodeValues) { - break; - } - bool merged = try_merge_or_rebalance(&iter); - // On the first iteration, we should update `res` with `iter` because `res` - // may have been invalidated. - if (first_iteration) { - res = iter; - first_iteration = false; - } - if (!merged) { - break; - } - iter.position = iter.node->position(); - iter.node = iter.node->parent(); - } - - // Adjust our return value. If we're pointing at the end of a node, advance - // the iterator. - if (res.position == res.node->count()) { - res.position = res.node->count() - 1; - ++res; - } - - return res; - } - - template - auto btree

::erase(iterator _begin, iterator _end) - -> std::pair { - difference_type count = std::distance(_begin, _end); - assert(count >= 0); - - if (count == 0) { - return {0, _begin}; - } - - if (count == (difference_type)size_) { - clear(); - return {count, this->end()}; - } - - if (_begin.node == _end.node) { - erase_same_node(_begin, _end); - size_ -= count; - return {count, rebalance_after_delete(_begin)}; - } - - const size_type target_size = size_ - count; - while (size_ > target_size) { - if (_begin.node->leaf()) { - const size_type remaining_to_erase = size_ - target_size; - const size_type remaining_in_node = _begin.node->count() - _begin.position; - _begin = erase_from_leaf_node( - _begin, (std::min)(remaining_to_erase, remaining_in_node)); - } else { - _begin = erase(_begin); - } - } - return {count, _begin}; - } - - template - void btree

::erase_same_node(iterator _begin, iterator _end) { - assert(_begin.node == _end.node); - assert(_end.position > _begin.position); - - node_type *node = _begin.node; - size_type to_erase = _end.position - _begin.position; - if (!node->leaf()) { - // Delete all children between _begin and _end. - for (size_type i = 0; i < to_erase; ++i) { - internal_clear(node->child(_begin.position + i + 1)); - } - // Rotate children after _end into new positions. - for (size_type i = _begin.position + to_erase + 1; i <= node->count(); ++i) { - node->set_child(i - to_erase, node->child(i)); - node->clear_child(i); - } - } - node->remove_values_ignore_children(_begin.position, to_erase, - mutable_allocator()); - - // Do not need to update rightmost_, because - // * either _end == this->end(), and therefore node == rightmost_, and still - // exists - // * or _end != this->end(), and therefore rightmost_ hasn't been erased, since - // it wasn't covered in [_begin, _end) - } - - template - auto btree

::erase_from_leaf_node(iterator _begin, size_type to_erase) - -> iterator { - node_type *node = _begin.node; - assert(node->leaf()); - assert(node->count() > _begin.position); - assert(_begin.position + to_erase <= node->count()); - - node->remove_values_ignore_children(_begin.position, to_erase, - mutable_allocator()); - - size_ -= to_erase; - - return rebalance_after_delete(_begin); - } - - template - template - auto btree

::erase_unique(const K &key) -> size_type { - const iterator iter = internal_find(key); - if (iter.node == nullptr) { - // The key doesn't exist in the tree, return nothing done. - return 0; - } - erase(iter); - return 1; - } - - template - template - auto btree

::erase_multi(const K &key) -> size_type { - const iterator _begin = internal_lower_bound(key); - if (_begin.node == nullptr) { - // The key doesn't exist in the tree, return nothing done. - return 0; - } - // Delete all of the keys between _begin and upper_bound(key). - const iterator _end = internal_end(internal_upper_bound(key)); - return erase(_begin, _end).first; - } - - template - void btree

::clear() { - if (!empty()) { - internal_clear(root()); - } - mutable_root() = EmptyNode(); - rightmost_ = EmptyNode(); - size_ = 0; - } - - template - void btree

::swap(btree &x) { - using std::swap; - if (phmap::allocator_traits< - allocator_type>::propagate_on_container_swap::value) { - // Note: `root_` also contains the allocator and the key comparator. - swap(root_, x.root_); - } else { - // It's undefined behavior if the allocators are unequal here. - assert(allocator() == x.allocator()); - swap(mutable_root(), x.mutable_root()); - swap(*mutable_key_comp(), *x.mutable_key_comp()); - } - swap(rightmost_, x.rightmost_); - swap(size_, x.size_); - } - - template - void btree

::verify() const { - assert(root() != nullptr); - assert(leftmost() != nullptr); - assert(rightmost_ != nullptr); - assert(empty() || size() == internal_verify(root(), nullptr, nullptr)); - assert(leftmost() == (++const_iterator(root(), -1)).node); - assert(rightmost_ == (--const_iterator(root(), root()->count())).node); - assert(leftmost()->leaf()); - assert(rightmost_->leaf()); - } - - template - void btree

::rebalance_or_split(iterator *iter) { - node_type *&node = iter->node; - int &insert_position = iter->position; - assert(node->count() == node->max_count()); - assert(kNodeValues == node->max_count()); - - // First try to make room on the node by rebalancing. - node_type *parent = node->parent(); - if (node != root()) { - if (node->position() > 0) { - // Try rebalancing with our left sibling. - node_type *left = parent->child(node->position() - 1); - assert(left->max_count() == kNodeValues); - if (left->count() < kNodeValues) { - // We bias rebalancing based on the position being inserted. If we're - // inserting at the end of the right node then we bias rebalancing to - // fill up the left node. - int to_move = (kNodeValues - left->count()) / - (1 + (insert_position < kNodeValues)); - to_move = (std::max)(1, to_move); - - if (((insert_position - to_move) >= 0) || - ((left->count() + to_move) < kNodeValues)) { - left->rebalance_right_to_left(to_move, node, mutable_allocator()); - - assert(node->max_count() - node->count() == to_move); - insert_position = insert_position - to_move; - if (insert_position < 0) { - insert_position = insert_position + left->count() + 1; - node = left; - } - - assert(node->count() < node->max_count()); - return; - } - } - } - - if (node->position() < parent->count()) { - // Try rebalancing with our right sibling. - node_type *right = parent->child(node->position() + 1); - assert(right->max_count() == kNodeValues); - if (right->count() < kNodeValues) { - // We bias rebalancing based on the position being inserted. If we're - // inserting at the _beginning of the left node then we bias rebalancing - // to fill up the right node. - int to_move = - (kNodeValues - right->count()) / (1 + (insert_position > 0)); - to_move = (std::max)(1, to_move); - - if ((insert_position <= (node->count() - to_move)) || - ((right->count() + to_move) < kNodeValues)) { - node->rebalance_left_to_right(to_move, right, mutable_allocator()); - - if (insert_position > node->count()) { - insert_position = insert_position - node->count() - 1; - node = right; - } - - assert(node->count() < node->max_count()); - return; - } - } - } - - // Rebalancing failed, make sure there is room on the parent node for a new - // value. - assert(parent->max_count() == kNodeValues); - if (parent->count() == kNodeValues) { - iterator parent_iter(node->parent(), node->position()); - rebalance_or_split(&parent_iter); - } - } else { - // Rebalancing not possible because this is the root node. - // Create a new root node and set the current root node as the child of the - // new root. - parent = new_internal_node(parent); - parent->init_child(0, root()); - mutable_root() = parent; - // If the former root was a leaf node, then it's now the rightmost node. - assert(!parent->child(0)->leaf() || parent->child(0) == rightmost_); - } - - // Split the node. - node_type *split_node; - if (node->leaf()) { - split_node = new_leaf_node(parent); - node->split(insert_position, split_node, mutable_allocator()); - if (rightmost_ == node) rightmost_ = split_node; - } else { - split_node = new_internal_node(parent); - node->split(insert_position, split_node, mutable_allocator()); - } - - if (insert_position > node->count()) { - insert_position = insert_position - node->count() - 1; - node = split_node; - } - } - - template - void btree

::merge_nodes(node_type *left, node_type *right) { - left->merge(right, mutable_allocator()); - if (right->leaf()) { - if (rightmost_ == right) rightmost_ = left; - delete_leaf_node(right); - } else { - delete_internal_node(right); - } - } - - template - bool btree

::try_merge_or_rebalance(iterator *iter) { - node_type *parent = iter->node->parent(); - if (iter->node->position() > 0) { - // Try merging with our left sibling. - node_type *left = parent->child(iter->node->position() - 1); - assert(left->max_count() == kNodeValues); - if ((1 + left->count() + iter->node->count()) <= kNodeValues) { - iter->position += 1 + left->count(); - merge_nodes(left, iter->node); - iter->node = left; - return true; - } - } - if (iter->node->position() < parent->count()) { - // Try merging with our right sibling. - node_type *right = parent->child(iter->node->position() + 1); - assert(right->max_count() == kNodeValues); - if ((1 + iter->node->count() + right->count()) <= kNodeValues) { - merge_nodes(iter->node, right); - return true; - } - // Try rebalancing with our right sibling. We don't perform rebalancing if - // we deleted the first element from iter->node and the node is not - // empty. This is a small optimization for the common pattern of deleting - // from the front of the tree. - if ((right->count() > kMinNodeValues) && - ((iter->node->count() == 0) || - (iter->position > 0))) { - int to_move = (right->count() - iter->node->count()) / 2; - to_move = (std::min)(to_move, right->count() - 1); - iter->node->rebalance_right_to_left(to_move, right, mutable_allocator()); - return false; - } - } - if (iter->node->position() > 0) { - // Try rebalancing with our left sibling. We don't perform rebalancing if - // we deleted the last element from iter->node and the node is not - // empty. This is a small optimization for the common pattern of deleting - // from the back of the tree. - node_type *left = parent->child(iter->node->position() - 1); - if ((left->count() > kMinNodeValues) && - ((iter->node->count() == 0) || - (iter->position < iter->node->count()))) { - int to_move = (left->count() - iter->node->count()) / 2; - to_move = (std::min)(to_move, left->count() - 1); - left->rebalance_left_to_right(to_move, iter->node, mutable_allocator()); - iter->position += to_move; - return false; - } - } - return false; - } - - template - void btree

::try_shrink() { - if (root()->count() > 0) { - return; - } - // Deleted the last item on the root node, shrink the height of the tree. - if (root()->leaf()) { - assert(size() == 0); - delete_leaf_node(root()); - mutable_root() = EmptyNode(); - rightmost_ = EmptyNode(); - } else { - node_type *child = root()->child(0); - child->make_root(); - delete_internal_node(root()); - mutable_root() = child; - } - } - - template - template - inline IterType btree

::internal_last(IterType iter) { - assert(iter.node != nullptr); - while (iter.position == iter.node->count()) { - iter.position = iter.node->position(); - iter.node = iter.node->parent(); - if (iter.node->leaf()) { - iter.node = nullptr; - break; - } - } - return iter; - } - - template - template - inline auto btree

::internal_emplace(iterator iter, Args &&... args) - -> iterator { - if (!iter.node->leaf()) { - // We can't insert on an internal node. Instead, we'll insert after the - // previous value which is guaranteed to be on a leaf node. - --iter; - ++iter.position; - } - const int max_count = iter.node->max_count(); - if (iter.node->count() == max_count) { - // Make room in the leaf for the new item. - if (max_count < kNodeValues) { - // Insertion into the root where the root is smaller than the full node - // size. Simply grow the size of the root node. - assert(iter.node == root()); - iter.node = - new_leaf_root_node((std::min)(kNodeValues, 2 * max_count)); - iter.node->swap(root(), mutable_allocator()); - delete_leaf_node(root()); - mutable_root() = iter.node; - rightmost_ = iter.node; - } else { - rebalance_or_split(&iter); - } - } - iter.node->emplace_value(iter.position, mutable_allocator(), - std::forward(args)...); - ++size_; - return iter; - } - - template - template - inline auto btree

::internal_locate(const K &key) const - -> SearchResult { - return internal_locate_impl(key, is_key_compare_to()); - } - - template - template - inline auto btree

::internal_locate_impl( - const K &key, std::false_type /* IsCompareTo */) const - -> SearchResult { - iterator iter(const_cast(root()), 0); - for (;;) { - iter.position = iter.node->lower_bound(key, key_comp()).value; - // NOTE: we don't need to walk all the way down the tree if the keys are - // equal, but determining equality would require doing an extra comparison - // on each node on the way down, and we will need to go all the way to the - // leaf node in the expected case. - if (iter.node->leaf()) { - break; - } - iter.node = iter.node->child(iter.position); - } - return {iter}; - } - - template - template - inline auto btree

::internal_locate_impl( - const K &key, std::true_type /* IsCompareTo */) const - -> SearchResult { - iterator iter(const_cast(root()), 0); - for (;;) { - SearchResult res = iter.node->lower_bound(key, key_comp()); - iter.position = res.value; - if (res.match == MatchKind::kEq) { - return {iter, MatchKind::kEq}; - } - if (iter.node->leaf()) { - break; - } - iter.node = iter.node->child(iter.position); - } - return {iter, MatchKind::kNe}; - } - - template - template - auto btree

::internal_lower_bound(const K &key) const -> iterator { - iterator iter(const_cast(root()), 0); - for (;;) { - iter.position = iter.node->lower_bound(key, key_comp()).value; - if (iter.node->leaf()) { - break; - } - iter.node = iter.node->child(iter.position); - } - return internal_last(iter); - } - - template - template - auto btree

::internal_upper_bound(const K &key) const -> iterator { - iterator iter(const_cast(root()), 0); - for (;;) { - iter.position = iter.node->upper_bound(key, key_comp()); - if (iter.node->leaf()) { - break; - } - iter.node = iter.node->child(iter.position); - } - return internal_last(iter); - } - - template - template - auto btree

::internal_find(const K &key) const -> iterator { - auto res = internal_locate(key); - if (res.HasMatch()) { - if (res.IsEq()) { - return res.value; - } - } else { - const iterator iter = internal_last(res.value); - if (iter.node != nullptr && !compare_keys(key, iter.key())) { - return iter; - } - } - return {nullptr, 0}; - } - - template - void btree

::internal_clear(node_type *node) { - if (!node->leaf()) { - for (int i = 0; i <= node->count(); ++i) { - internal_clear(node->child(i)); - } - delete_internal_node(node); - } else { - delete_leaf_node(node); - } - } - - template - int btree

::internal_verify( - const node_type *node, const key_type *lo, const key_type *hi) const { - assert(node->count() > 0); - assert(node->count() <= node->max_count()); - if (lo) { - assert(!compare_keys(node->key(0), *lo)); - } - if (hi) { - assert(!compare_keys(*hi, node->key(node->count() - 1))); - } - for (int i = 1; i < node->count(); ++i) { - assert(!compare_keys(node->key(i), node->key(i - 1))); - } - int count = node->count(); - if (!node->leaf()) { - for (int i = 0; i <= node->count(); ++i) { - assert(node->child(i) != nullptr); - assert(node->child(i)->parent() == node); - assert(node->child(i)->position() == i); - count += internal_verify( - node->child(i), - (i == 0) ? lo : &node->key(i - 1), - (i == node->count()) ? hi : &node->key(i)); - } - } - return count; - } - - // A common base class for btree_set, btree_map, btree_multiset, and btree_multimap. - // --------------------------------------------------------------------------------- - template - class btree_container { - using params_type = typename Tree::params_type; - - protected: - // Alias used for heterogeneous lookup functions. - // `key_arg` evaluates to `K` when the functors are transparent and to - // `key_type` otherwise. It permits template argument deduction on `K` for the - // transparent case. - template - using key_arg = - typename KeyArg::value>:: - template type; - - public: - using key_type = typename Tree::key_type; - using value_type = typename Tree::value_type; - using size_type = typename Tree::size_type; - using difference_type = typename Tree::difference_type; - using key_compare = typename Tree::key_compare; - using value_compare = typename Tree::value_compare; - using allocator_type = typename Tree::allocator_type; - using reference = typename Tree::reference; - using const_reference = typename Tree::const_reference; - using pointer = typename Tree::pointer; - using const_pointer = typename Tree::const_pointer; - using iterator = typename Tree::iterator; - using const_iterator = typename Tree::const_iterator; - using reverse_iterator = typename Tree::reverse_iterator; - using const_reverse_iterator = typename Tree::const_reverse_iterator; - using node_type = typename Tree::node_handle_type; - - // Constructors/assignments. - btree_container() : tree_(key_compare(), allocator_type()) {} - explicit btree_container(const key_compare &comp, - const allocator_type &alloc = allocator_type()) - : tree_(comp, alloc) {} - btree_container(const btree_container &x) = default; - btree_container(btree_container &&x) noexcept = default; - btree_container &operator=(const btree_container &x) = default; - btree_container &operator=(btree_container &&x) noexcept( - std::is_nothrow_move_assignable::value) = default; - - // Iterator routines. - iterator begin() { return tree_.begin(); } - const_iterator begin() const { return tree_.begin(); } - const_iterator cbegin() const { return tree_.begin(); } - iterator end() { return tree_.end(); } - const_iterator end() const { return tree_.end(); } - const_iterator cend() const { return tree_.end(); } - reverse_iterator rbegin() { return tree_.rbegin(); } - const_reverse_iterator rbegin() const { return tree_.rbegin(); } - const_reverse_iterator crbegin() const { return tree_.rbegin(); } - reverse_iterator rend() { return tree_.rend(); } - const_reverse_iterator rend() const { return tree_.rend(); } - const_reverse_iterator crend() const { return tree_.rend(); } - - // Lookup routines. - // ---------------- - template - size_type count(const key_arg &key) const { - auto equal_range = this->equal_range(key); - return std::distance(equal_range.first, equal_range.second); - } - template - iterator find(const key_arg &key) { - return tree_.find(key); - } - template - const_iterator find(const key_arg &key) const { return tree_.find(key); } - - template - bool contains(const key_arg &key) const { return find(key) != end(); } - - template - iterator lower_bound(const key_arg &key) { return tree_.lower_bound(key); } - - template - const_iterator lower_bound(const key_arg &key) const { return tree_.lower_bound(key); } - - template - iterator upper_bound(const key_arg &key) { return tree_.upper_bound(key); } - - template - const_iterator upper_bound(const key_arg &key) const { return tree_.upper_bound(key); } - - template - std::pair equal_range(const key_arg &key) { return tree_.equal_range(key); } - - template - std::pair equal_range( - const key_arg &key) const { - return tree_.equal_range(key); - } - - iterator erase(const_iterator iter) { return tree_.erase(iterator(iter)); } - iterator erase(iterator iter) { return tree_.erase(iter); } - iterator erase(const_iterator first, const_iterator last) { - return tree_.erase(iterator(first), iterator(last)).second; - } - template - size_type erase(const key_arg &key) { - auto equal_range = this->equal_range(key); - return tree_.erase_range(equal_range.first, equal_range.second).first; - } - node_type extract(iterator position) { - // Use Move instead of Transfer, because the rebalancing code expects to - // have a valid object to scribble metadata bits on top of. - auto node = CommonAccess::Move(get_allocator(), position.slot()); - erase(position); - return node; - } - - node_type extract(const_iterator position) { - return extract(iterator(position)); - } - - public: - void clear() { tree_.clear(); } - void swap(btree_container &x) { tree_.swap(x.tree_); } - void verify() const { tree_.verify(); } - - size_type size() const { return tree_.size(); } - size_type max_size() const { return tree_.max_size(); } - bool empty() const { return tree_.empty(); } - - friend bool operator==(const btree_container &x, const btree_container &y) { - if (x.size() != y.size()) return false; - return std::equal(x.begin(), x.end(), y.begin()); - } - - friend bool operator!=(const btree_container &x, const btree_container &y) { return !(x == y); } - - friend bool operator<(const btree_container &x, const btree_container &y) { - return std::lexicographical_compare(x.begin(), x.end(), y.begin(), y.end()); - } - - friend bool operator>(const btree_container &x, const btree_container &y) { return y < x; } - - friend bool operator<=(const btree_container &x, const btree_container &y) { return !(y < x); } - - friend bool operator>=(const btree_container &x, const btree_container &y) { return !(x < y); } - - // The allocator used by the btree. - allocator_type get_allocator() const { return tree_.get_allocator(); } - - // The key comparator used by the btree. - key_compare key_comp() const { return tree_.key_comp(); } - value_compare value_comp() const { return tree_.value_comp(); } - - // Support absl::Hash. - template - friend State AbslHashValue(State h, const btree_container &b) { - for (const auto &v : b) { - h = State::combine(std::move(h), v); - } - return State::combine(std::move(h), b.size()); - } - - protected: - Tree tree_; - }; - - // A common base class for btree_set and btree_map. - // ----------------------------------------------- - template - class btree_set_container : public btree_container { - using super_type = btree_container; - using params_type = typename Tree::params_type; - using init_type = typename params_type::init_type; - using is_key_compare_to = typename params_type::is_key_compare_to; - friend class BtreeNodePeer; - - protected: - template - using key_arg = typename super_type::template key_arg; - - public: - using key_type = typename Tree::key_type; - using value_type = typename Tree::value_type; - using size_type = typename Tree::size_type; - using key_compare = typename Tree::key_compare; - using allocator_type = typename Tree::allocator_type; - using iterator = typename Tree::iterator; - using const_iterator = typename Tree::const_iterator; - using node_type = typename super_type::node_type; - using insert_return_type = InsertReturnType; - using super_type::super_type; - btree_set_container() {} - - template - btree_set_container(InputIterator b, InputIterator e, - const key_compare &comp = key_compare(), - const allocator_type &alloc = allocator_type()) - : super_type(comp, alloc) { - insert(b, e); - } - - btree_set_container(std::initializer_list init, - const key_compare &comp = key_compare(), - const allocator_type &alloc = allocator_type()) - : btree_set_container(init.begin(), init.end(), comp, alloc) {} - - btree_set_container(std::initializer_list init, - const allocator_type &alloc) - : btree_set_container(init.begin(), init.end(), alloc) {} - - // Lookup routines. - template - size_type count(const key_arg &key) const { - return this->tree_.count_unique(key); - } - - // Insertion routines. - std::pair insert(const value_type &x) { - return this->tree_.insert_unique(params_type::key(x), x); - } - std::pair insert(value_type &&x) { - return this->tree_.insert_unique(params_type::key(x), std::move(x)); - } - template - std::pair emplace(Args &&... args) { - init_type v(std::forward(args)...); - return this->tree_.insert_unique(params_type::key(v), std::move(v)); - } - iterator insert(const_iterator hint, const value_type &x) { - return this->tree_ - .insert_hint_unique(iterator(hint), params_type::key(x), x) - .first; - } - iterator insert(const_iterator hint, value_type &&x) { - return this->tree_ - .insert_hint_unique(iterator(hint), params_type::key(x), - std::move(x)) - .first; - } - - template - iterator emplace_hint(const_iterator hint, Args &&... args) { - init_type v(std::forward(args)...); - return this->tree_ - .insert_hint_unique(iterator(hint), params_type::key(v), - std::move(v)) - .first; - } - - template - void insert(InputIterator b, InputIterator e) { - this->tree_.insert_iterator_unique(b, e); - } - - void insert(std::initializer_list init) { - this->tree_.insert_iterator_unique(init.begin(), init.end()); - } - - insert_return_type insert(node_type &&node) { - if (!node) return {this->end(), false, node_type()}; - std::pair res = - this->tree_.insert_unique(params_type::key(CommonAccess::GetSlot(node)), - CommonAccess::GetSlot(node)); - if (res.second) { - CommonAccess::Destroy(&node); - return {res.first, true, node_type()}; - } else { - return {res.first, false, std::move(node)}; - } - } - - iterator insert(const_iterator hint, node_type &&node) { - if (!node) return this->end(); - std::pair res = this->tree_.insert_hint_unique( - iterator(hint), params_type::key(CommonAccess::GetSlot(node)), - CommonAccess::GetSlot(node)); - if (res.second) CommonAccess::Destroy(&node); - return res.first; - } - - template - size_type erase(const key_arg &key) { return this->tree_.erase_unique(key); } - using super_type::erase; - - template - node_type extract(const key_arg &key) { - auto it = this->find(key); - return it == this->end() ? node_type() : extract(it); - } - - using super_type::extract; - - // Merge routines. - // Moves elements from `src` into `this`. If the element already exists in - // `this`, it is left unmodified in `src`. - template < - typename T, - typename phmap::enable_if_t< - phmap::conjunction< - std::is_same, - std::is_same, - std::is_same>::value, - int> = 0> - void merge(btree_container &src) { // NOLINT - for (auto src_it = src.begin(); src_it != src.end();) { - if (insert(std::move(*src_it)).second) { - src_it = src.erase(src_it); - } else { - ++src_it; - } - } - } - - template < - typename T, - typename phmap::enable_if_t< - phmap::conjunction< - std::is_same, - std::is_same, - std::is_same>::value, - int> = 0> - void merge(btree_container &&src) { - merge(src); - } - }; - - // Base class for btree_map. - // ------------------------- - template - class btree_map_container : public btree_set_container { - using super_type = btree_set_container; - using params_type = typename Tree::params_type; - - protected: - template - using key_arg = typename super_type::template key_arg; - - public: - using key_type = typename Tree::key_type; - using mapped_type = typename params_type::mapped_type; - using value_type = typename Tree::value_type; - using key_compare = typename Tree::key_compare; - using allocator_type = typename Tree::allocator_type; - using iterator = typename Tree::iterator; - using const_iterator = typename Tree::const_iterator; - - // Inherit constructors. - using super_type::super_type; - btree_map_container() {} - - // Insertion routines. - template - std::pair try_emplace(const key_type &k, Args &&... args) { - return this->tree_.insert_unique( - k, std::piecewise_construct, std::forward_as_tuple(k), - std::forward_as_tuple(std::forward(args)...)); - } - template - std::pair try_emplace(key_type &&k, Args &&... args) { - // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k` - // and then using `k` unsequenced. This is safe because the move is into a - // forwarding reference and insert_unique guarantees that `key` is never - // referenced after consuming `args`. - const key_type& key_ref = k; - return this->tree_.insert_unique( - key_ref, std::piecewise_construct, std::forward_as_tuple(std::move(k)), - std::forward_as_tuple(std::forward(args)...)); - } - template - iterator try_emplace(const_iterator hint, const key_type &k, - Args &&... args) { - return this->tree_ - .insert_hint_unique(iterator(hint), k, std::piecewise_construct, - std::forward_as_tuple(k), - std::forward_as_tuple(std::forward(args)...)) - .first; - } - template - iterator try_emplace(const_iterator hint, key_type &&k, Args &&... args) { - // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k` - // and then using `k` unsequenced. This is safe because the move is into a - // forwarding reference and insert_hint_unique guarantees that `key` is - // never referenced after consuming `args`. - const key_type& key_ref = k; - return this->tree_ - .insert_hint_unique(iterator(hint), key_ref, std::piecewise_construct, - std::forward_as_tuple(std::move(k)), - std::forward_as_tuple(std::forward(args)...)) - .first; - } - mapped_type &operator[](const key_type &k) { - return try_emplace(k).first->second; - } - mapped_type &operator[](key_type &&k) { - return try_emplace(std::move(k)).first->second; - } - - template - mapped_type &at(const key_arg &key) { - auto it = this->find(key); - if (it == this->end()) - base_internal::ThrowStdOutOfRange("phmap::btree_map::at"); - return it->second; - } - template - const mapped_type &at(const key_arg &key) const { - auto it = this->find(key); - if (it == this->end()) - base_internal::ThrowStdOutOfRange("phmap::btree_map::at"); - return it->second; - } - }; - - // A common base class for btree_multiset and btree_multimap. - template - class btree_multiset_container : public btree_container { - using super_type = btree_container; - using params_type = typename Tree::params_type; - using init_type = typename params_type::init_type; - using is_key_compare_to = typename params_type::is_key_compare_to; - - template - using key_arg = typename super_type::template key_arg; - - public: - using key_type = typename Tree::key_type; - using value_type = typename Tree::value_type; - using size_type = typename Tree::size_type; - using key_compare = typename Tree::key_compare; - using allocator_type = typename Tree::allocator_type; - using iterator = typename Tree::iterator; - using const_iterator = typename Tree::const_iterator; - using node_type = typename super_type::node_type; - - // Inherit constructors. - using super_type::super_type; - btree_multiset_container() {} - - // Range constructor. - template - btree_multiset_container(InputIterator b, InputIterator e, - const key_compare &comp = key_compare(), - const allocator_type &alloc = allocator_type()) - : super_type(comp, alloc) { - insert(b, e); - } - - // Initializer list constructor. - btree_multiset_container(std::initializer_list init, - const key_compare &comp = key_compare(), - const allocator_type &alloc = allocator_type()) - : btree_multiset_container(init.begin(), init.end(), comp, alloc) {} - - // Lookup routines. - template - size_type count(const key_arg &key) const { - return this->tree_.count_multi(key); - } - - // Insertion routines. - iterator insert(const value_type &x) { return this->tree_.insert_multi(x); } - iterator insert(value_type &&x) { - return this->tree_.insert_multi(std::move(x)); - } - iterator insert(const_iterator hint, const value_type &x) { - return this->tree_.insert_hint_multi(iterator(hint), x); - } - iterator insert(const_iterator hint, value_type &&x) { - return this->tree_.insert_hint_multi(iterator(hint), std::move(x)); - } - template - void insert(InputIterator b, InputIterator e) { - this->tree_.insert_iterator_multi(b, e); - } - void insert(std::initializer_list init) { - this->tree_.insert_iterator_multi(init.begin(), init.end()); - } - template - iterator emplace(Args &&... args) { - return this->tree_.insert_multi(init_type(std::forward(args)...)); - } - template - iterator emplace_hint(const_iterator hint, Args &&... args) { - return this->tree_.insert_hint_multi( - iterator(hint), init_type(std::forward(args)...)); - } - iterator insert(node_type &&node) { - if (!node) return this->end(); - iterator res = - this->tree_.insert_multi(params_type::key(CommonAccess::GetSlot(node)), - CommonAccess::GetSlot(node)); - CommonAccess::Destroy(&node); - return res; - } - iterator insert(const_iterator hint, node_type &&node) { - if (!node) return this->end(); - iterator res = this->tree_.insert_hint_multi( - iterator(hint), - std::move(params_type::element(CommonAccess::GetSlot(node)))); - CommonAccess::Destroy(&node); - return res; - } - - // Deletion routines. - template - size_type erase(const key_arg &key) { - return this->tree_.erase_multi(key); - } - using super_type::erase; - - // Node extraction routines. - template - node_type extract(const key_arg &key) { - auto it = this->find(key); - return it == this->end() ? node_type() : extract(it); - } - using super_type::extract; - - // Merge routines. - // Moves all elements from `src` into `this`. - template < - typename T, - typename phmap::enable_if_t< - phmap::conjunction< - std::is_same, - std::is_same, - std::is_same>::value, - int> = 0> - void merge(btree_container &src) { // NOLINT - insert(std::make_move_iterator(src.begin()), - std::make_move_iterator(src.end())); - src.clear(); - } - - template < - typename T, - typename phmap::enable_if_t< - phmap::conjunction< - std::is_same, - std::is_same, - std::is_same>::value, - int> = 0> - void merge(btree_container &&src) { - merge(src); - } - }; - - // A base class for btree_multimap. - template - class btree_multimap_container : public btree_multiset_container { - using super_type = btree_multiset_container; - using params_type = typename Tree::params_type; - - public: - using mapped_type = typename params_type::mapped_type; - - // Inherit constructors. - using super_type::super_type; - btree_multimap_container() {} - }; - -} // namespace priv - - - - // ---------------------------------------------------------------------- - // btree_set - default values in phmap_fwd_decl.h - // ---------------------------------------------------------------------- - template - class btree_set : public priv::btree_set_container< - priv::btree>> - { - using Base = typename btree_set::btree_set_container; - - public: - btree_set() {} - using Base::Base; - using Base::begin; - using Base::cbegin; - using Base::end; - using Base::cend; - using Base::empty; - using Base::max_size; - using Base::size; - using Base::clear; - using Base::erase; - using Base::insert; - using Base::emplace; - using Base::emplace_hint; - using Base::extract; - using Base::merge; - using Base::swap; - using Base::contains; - using Base::count; - using Base::equal_range; - using Base::lower_bound; - using Base::upper_bound; - using Base::find; - using Base::get_allocator; - using Base::key_comp; - using Base::value_comp; - }; - - // Swaps the contents of two `phmap::btree_set` containers. - // ------------------------------------------------------- - template - void swap(btree_set &x, btree_set &y) { - return x.swap(y); - } - - // Erases all elements that satisfy the predicate pred from the container. - // ---------------------------------------------------------------------- - template - void erase_if(btree_set &set, Pred pred) { - for (auto it = set.begin(); it != set.end();) { - if (pred(*it)) { - it = set.erase(it); - } else { - ++it; - } - } - } - - // ---------------------------------------------------------------------- - // btree_multiset - default values in phmap_fwd_decl.h - // ---------------------------------------------------------------------- - template - class btree_multiset : public priv::btree_multiset_container< - priv::btree>> - { - using Base = typename btree_multiset::btree_multiset_container; - - public: - btree_multiset() {} - using Base::Base; - using Base::begin; - using Base::cbegin; - using Base::end; - using Base::cend; - using Base::empty; - using Base::max_size; - using Base::size; - using Base::clear; - using Base::erase; - using Base::insert; - using Base::emplace; - using Base::emplace_hint; - using Base::extract; - using Base::merge; - using Base::swap; - using Base::contains; - using Base::count; - using Base::equal_range; - using Base::lower_bound; - using Base::upper_bound; - using Base::find; - using Base::get_allocator; - using Base::key_comp; - using Base::value_comp; - }; - - // Swaps the contents of two `phmap::btree_multiset` containers. - // ------------------------------------------------------------ - template - void swap(btree_multiset &x, btree_multiset &y) { - return x.swap(y); - } - - // Erases all elements that satisfy the predicate pred from the container. - // ---------------------------------------------------------------------- - template - void erase_if(btree_multiset &set, Pred pred) { - for (auto it = set.begin(); it != set.end();) { - if (pred(*it)) { - it = set.erase(it); - } else { - ++it; - } - } - } - - - // ---------------------------------------------------------------------- - // btree_map - default values in phmap_fwd_decl.h - // ---------------------------------------------------------------------- - template - class btree_map : public priv::btree_map_container< - priv::btree>> - { - using Base = typename btree_map::btree_map_container; - - public: - btree_map() {} - using Base::Base; - using Base::begin; - using Base::cbegin; - using Base::end; - using Base::cend; - using Base::empty; - using Base::max_size; - using Base::size; - using Base::clear; - using Base::erase; - using Base::insert; - using Base::emplace; - using Base::emplace_hint; - using Base::try_emplace; - using Base::extract; - using Base::merge; - using Base::swap; - using Base::at; - using Base::contains; - using Base::count; - using Base::equal_range; - using Base::lower_bound; - using Base::upper_bound; - using Base::find; - using Base::operator[]; - using Base::get_allocator; - using Base::key_comp; - using Base::value_comp; - }; - - // Swaps the contents of two `phmap::btree_map` containers. - // ------------------------------------------------------- - template - void swap(btree_map &x, btree_map &y) { - return x.swap(y); - } - - // ---------------------------------------------------------------------- - template - void erase_if(btree_map &map, Pred pred) { - for (auto it = map.begin(); it != map.end();) { - if (pred(*it)) { - it = map.erase(it); - } else { - ++it; - } - } - } - - // ---------------------------------------------------------------------- - // btree_multimap - default values in phmap_fwd_decl.h - // ---------------------------------------------------------------------- - template - class btree_multimap : public priv::btree_multimap_container< - priv::btree>> - { - using Base = typename btree_multimap::btree_multimap_container; - - public: - btree_multimap() {} - using Base::Base; - using Base::begin; - using Base::cbegin; - using Base::end; - using Base::cend; - using Base::empty; - using Base::max_size; - using Base::size; - using Base::clear; - using Base::erase; - using Base::insert; - using Base::emplace; - using Base::emplace_hint; - using Base::extract; - using Base::merge; - using Base::swap; - using Base::contains; - using Base::count; - using Base::equal_range; - using Base::lower_bound; - using Base::upper_bound; - using Base::find; - using Base::get_allocator; - using Base::key_comp; - using Base::value_comp; - }; - - // Swaps the contents of two `phmap::btree_multimap` containers. - // ------------------------------------------------------------ - template - void swap(btree_multimap &x, btree_multimap &y) { - return x.swap(y); - } - - // Erases all elements that satisfy the predicate pred from the container. - // ---------------------------------------------------------------------- - template - void erase_if(btree_multimap &map, Pred pred) { - for (auto it = map.begin(); it != map.end();) { - if (pred(*it)) { - it = map.erase(it); - } else { - ++it; - } - } - } - - -} // namespace btree - -#ifdef _MSC_VER - #pragma warning(pop) -#endif - - -#endif // PHMAP_BTREE_BTREE_CONTAINER_H_ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/parallel_hashmap/conanfile.py b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/parallel_hashmap/conanfile.py deleted file mode 100644 index f3f4df9..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/parallel_hashmap/conanfile.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -from conans import ConanFile, tools -import os - -class SparseppConan(ConanFile): - name = "parallel_hashmap" - version = "1.34" - description = "A header-only, very fast and memory-friendly hash map" - url = "https://github.com/greg7mdp/parallel-hashmap/blob/master/parallel_hashmap/conanfile.py" - - # Indicates License type of the packaged library - license = "https://github.com/greg7mdp/parallel-hashmap/blob/master/LICENSE" - - # Packages the license for the conanfile.py - exports = ["LICENSE"] - - # Custom attributes for Bincrafters recipe conventions - source_subfolder = "source_subfolder" - - def source(self): - source_url = "https://github.com/greg7mdp/parallel-hashmap" - tools.get("{0}/archive/{1}.tar.gz".format(source_url, self.version)) - extracted_dir = self.name + "-" + self.version - - #Rename to "source_folder" is a convention to simplify later steps - os.rename(extracted_dir, self.source_subfolder) - - - def package(self): - include_folder = os.path.join(self.source_subfolder, "parallel_hashmap") - self.copy(pattern="LICENSE") - self.copy(pattern="*", dst="include/parallel_hashmap", src=include_folder) - - def package_id(self): - self.info.header_only() diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/parallel_hashmap/meminfo.h b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/parallel_hashmap/meminfo.h deleted file mode 100644 index 872f3c6..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/parallel_hashmap/meminfo.h +++ /dev/null @@ -1,195 +0,0 @@ -#if !defined(spp_memory_h_guard) -#define spp_memory_h_guard - -#include -#include -#include - -#if defined(_WIN32) || defined( __CYGWIN__) - #define SPP_WIN -#endif - -#ifdef SPP_WIN - #include - #include - #undef min - #undef max -#elif defined(__linux__) - #include - #include -#elif defined(__FreeBSD__) - #include - #include - #include - #include - #include - #include -#endif - -namespace spp -{ - uint64_t GetSystemMemory(); - uint64_t GetTotalMemoryUsed(); - uint64_t GetProcessMemoryUsed(); - uint64_t GetPhysicalMemory(); - - uint64_t GetSystemMemory() - { -#ifdef SPP_WIN - MEMORYSTATUSEX memInfo; - memInfo.dwLength = sizeof(MEMORYSTATUSEX); - GlobalMemoryStatusEx(&memInfo); - return static_cast(memInfo.ullTotalPageFile); -#elif defined(__linux__) - struct sysinfo memInfo; - sysinfo (&memInfo); - auto totalVirtualMem = memInfo.totalram; - - totalVirtualMem += memInfo.totalswap; - totalVirtualMem *= memInfo.mem_unit; - return static_cast(totalVirtualMem); -#elif defined(__FreeBSD__) - kvm_t *kd; - u_int pageCnt; - size_t pageCntLen = sizeof(pageCnt); - u_int pageSize; - struct kvm_swap kswap; - uint64_t totalVirtualMem; - - pageSize = static_cast(getpagesize()); - - sysctlbyname("vm.stats.vm.v_page_count", &pageCnt, &pageCntLen, NULL, 0); - totalVirtualMem = pageCnt * pageSize; - - kd = kvm_open(NULL, _PATH_DEVNULL, NULL, O_RDONLY, "kvm_open"); - kvm_getswapinfo(kd, &kswap, 1, 0); - kvm_close(kd); - totalVirtualMem += kswap.ksw_total * pageSize; - - return totalVirtualMem; -#else - return 0; -#endif - } - - uint64_t GetTotalMemoryUsed() - { -#ifdef SPP_WIN - MEMORYSTATUSEX memInfo; - memInfo.dwLength = sizeof(MEMORYSTATUSEX); - GlobalMemoryStatusEx(&memInfo); - return static_cast(memInfo.ullTotalPageFile - memInfo.ullAvailPageFile); -#elif defined(__linux__) - struct sysinfo memInfo; - sysinfo(&memInfo); - auto virtualMemUsed = memInfo.totalram - memInfo.freeram; - - virtualMemUsed += memInfo.totalswap - memInfo.freeswap; - virtualMemUsed *= memInfo.mem_unit; - - return static_cast(virtualMemUsed); -#elif defined(__FreeBSD__) - kvm_t *kd; - u_int pageSize; - u_int pageCnt, freeCnt; - size_t pageCntLen = sizeof(pageCnt); - size_t freeCntLen = sizeof(freeCnt); - struct kvm_swap kswap; - uint64_t virtualMemUsed; - - pageSize = static_cast(getpagesize()); - - sysctlbyname("vm.stats.vm.v_page_count", &pageCnt, &pageCntLen, NULL, 0); - sysctlbyname("vm.stats.vm.v_free_count", &freeCnt, &freeCntLen, NULL, 0); - virtualMemUsed = (pageCnt - freeCnt) * pageSize; - - kd = kvm_open(NULL, _PATH_DEVNULL, NULL, O_RDONLY, "kvm_open"); - kvm_getswapinfo(kd, &kswap, 1, 0); - kvm_close(kd); - virtualMemUsed += kswap.ksw_used * pageSize; - - return virtualMemUsed; -#else - return 0; -#endif - } - - uint64_t GetProcessMemoryUsed() - { -#ifdef SPP_WIN - PROCESS_MEMORY_COUNTERS_EX pmc; - GetProcessMemoryInfo(GetCurrentProcess(), reinterpret_cast(&pmc), sizeof(pmc)); - return static_cast(pmc.PrivateUsage); -#elif defined(__linux__) - auto parseLine = - [](char* line)->int - { - auto i = strlen(line); - - while(*line < '0' || *line > '9') - { - line++; - } - - line[i-3] = '\0'; - i = atoi(line); - return i; - }; - - auto file = fopen("/proc/self/status", "r"); - auto result = -1; - char line[128]; - - while(fgets(line, 128, file) != nullptr) - { - if(strncmp(line, "VmSize:", 7) == 0) - { - result = parseLine(line); - break; - } - } - - fclose(file); - return static_cast(result) * 1024; -#elif defined(__FreeBSD__) - struct kinfo_proc info; - size_t infoLen = sizeof(info); - int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, getpid() }; - - sysctl(mib, sizeof(mib) / sizeof(*mib), &info, &infoLen, NULL, 0); - return static_cast(info.ki_rssize * getpagesize()); -#else - return 0; -#endif - } - - uint64_t GetPhysicalMemory() - { -#ifdef SPP_WIN - MEMORYSTATUSEX memInfo; - memInfo.dwLength = sizeof(MEMORYSTATUSEX); - GlobalMemoryStatusEx(&memInfo); - return static_cast(memInfo.ullTotalPhys); -#elif defined(__linux__) - struct sysinfo memInfo; - sysinfo(&memInfo); - - auto totalPhysMem = memInfo.totalram; - - totalPhysMem *= memInfo.mem_unit; - return static_cast(totalPhysMem); -#elif defined(__FreeBSD__) - u_long physMem; - size_t physMemLen = sizeof(physMem); - int mib[] = { CTL_HW, HW_PHYSMEM }; - - sysctl(mib, sizeof(mib) / sizeof(*mib), &physMem, &physMemLen, NULL, 0); - return physMem; -#else - return 0; -#endif - } - -} - -#endif // spp_memory_h_guard diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/parallel_hashmap/phmap.h b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/parallel_hashmap/phmap.h deleted file mode 100644 index 4e357c7..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/parallel_hashmap/phmap.h +++ /dev/null @@ -1,5054 +0,0 @@ -#if !defined(phmap_h_guard_) -#define phmap_h_guard_ - -// --------------------------------------------------------------------------- -// Copyright (c) 2019, Gregory Popovitch - greg7mdp@gmail.com -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Includes work from abseil-cpp (https://github.com/abseil/abseil-cpp) -// with modifications. -// -// Copyright 2018 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// --------------------------------------------------------------------------- - -// --------------------------------------------------------------------------- -// IMPLEMENTATION DETAILS -// -// The table stores elements inline in a slot array. In addition to the slot -// array the table maintains some control state per slot. The extra state is one -// byte per slot and stores empty or deleted marks, or alternatively 7 bits from -// the hash of an occupied slot. The table is split into logical groups of -// slots, like so: -// -// Group 1 Group 2 Group 3 -// +---------------+---------------+---------------+ -// | | | | | | | | | | | | | | | | | | | | | | | | | -// +---------------+---------------+---------------+ -// -// On lookup the hash is split into two parts: -// - H2: 7 bits (those stored in the control bytes) -// - H1: the rest of the bits -// The groups are probed using H1. For each group the slots are matched to H2 in -// parallel. Because H2 is 7 bits (128 states) and the number of slots per group -// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit. -// -// On insert, once the right group is found (as in lookup), its slots are -// filled in order. -// -// On erase a slot is cleared. In case the group did not have any empty slots -// before the erase, the erased slot is marked as deleted. -// -// Groups without empty slots (but maybe with deleted slots) extend the probe -// sequence. The probing algorithm is quadratic. Given N the number of groups, -// the probing function for the i'th probe is: -// -// P(0) = H1 % N -// -// P(i) = (P(i - 1) + i) % N -// -// This probing function guarantees that after N probes, all the groups of the -// table will be probed exactly once. -// -// The control state and slot array are stored contiguously in a shared heap -// allocation. The layout of this allocation is: `capacity()` control bytes, -// one sentinel control byte, `Group::kWidth - 1` cloned control bytes, -// , `capacity()` slots. The sentinel control byte is used in -// iteration so we know when we reach the end of the table. The cloned control -// bytes at the end of the table are cloned from the beginning of the table so -// groups that begin near the end of the table can see a full group. In cases in -// which there are more than `capacity()` cloned control bytes, the extra bytes -// are `kEmpty`, and these ensure that we always see at least one empty slot and -// can stop an unsuccessful search. -// --------------------------------------------------------------------------- - - - -#ifdef _MSC_VER - #pragma warning(push) - - #pragma warning(disable : 4127) // conditional expression is constant - #pragma warning(disable : 4324) // structure was padded due to alignment specifier - #pragma warning(disable : 4514) // unreferenced inline function has been removed - #pragma warning(disable : 4623) // default constructor was implicitly defined as deleted - #pragma warning(disable : 4625) // copy constructor was implicitly defined as deleted - #pragma warning(disable : 4626) // assignment operator was implicitly defined as deleted - #pragma warning(disable : 4710) // function not inlined - #pragma warning(disable : 4711) // selected for automatic inline expansion - #pragma warning(disable : 4820) // '6' bytes padding added after data member - #pragma warning(disable : 4868) // compiler may not enforce left-to-right evaluation order in braced initializer list - #pragma warning(disable : 5027) // move assignment operator was implicitly defined as deleted - #pragma warning(disable : 5045) // Compiler will insert Spectre mitigation for memory load if /Qspectre switch specified -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "phmap_fwd_decl.h" -#include "phmap_utils.h" -#include "phmap_base.h" - -#if PHMAP_HAVE_STD_STRING_VIEW - #include -#endif - -namespace phmap { - -namespace priv { - -// -------------------------------------------------------------------------- -template -void SwapAlloc(AllocType& lhs, AllocType& rhs, - std::true_type /* propagate_on_container_swap */) { - using std::swap; - swap(lhs, rhs); -} -template -void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/, - std::false_type /* propagate_on_container_swap */) {} - -// -------------------------------------------------------------------------- -template -class probe_seq -{ -public: - probe_seq(size_t hashval, size_t mask) { - assert(((mask + 1) & mask) == 0 && "not a mask"); - mask_ = mask; - offset_ = hashval & mask_; - } - size_t offset() const { return offset_; } - size_t offset(size_t i) const { return (offset_ + i) & mask_; } - - void next() { - index_ += Width; - offset_ += index_; - offset_ &= mask_; - } - // 0-based probe index. The i-th probe in the probe sequence. - size_t getindex() const { return index_; } - -private: - size_t mask_; - size_t offset_; - size_t index_ = 0; -}; - -// -------------------------------------------------------------------------- -template -struct RequireUsableKey -{ - template - std::pair< - decltype(std::declval()(std::declval())), - decltype(std::declval()(std::declval(), - std::declval()))>* - operator()(const PassedKey&, const Args&...) const; -}; - -// -------------------------------------------------------------------------- -template -struct IsDecomposable : std::false_type {}; - -template -struct IsDecomposable< - phmap::void_t(), - std::declval()...))>, - Policy, Hash, Eq, Ts...> : std::true_type {}; - -// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it. -// -------------------------------------------------------------------------- -template -constexpr bool IsNoThrowSwappable() { - using std::swap; - return noexcept(swap(std::declval(), std::declval())); -} - -// -------------------------------------------------------------------------- -template -int TrailingZeros(T x) { - PHMAP_IF_CONSTEXPR(sizeof(T) == 8) - return base_internal::CountTrailingZerosNonZero64(static_cast(x)); - else - return base_internal::CountTrailingZerosNonZero32(static_cast(x)); -} - -// -------------------------------------------------------------------------- -template -int LeadingZeros(T x) { - PHMAP_IF_CONSTEXPR(sizeof(T) == 8) - return base_internal::CountLeadingZeros64(static_cast(x)); - else - return base_internal::CountLeadingZeros32(static_cast(x)); -} - -// -------------------------------------------------------------------------- -// An abstraction over a bitmask. It provides an easy way to iterate through the -// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE), -// this is a true bitmask. On non-SSE, platforms the arithematic used to -// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as -// either 0x00 or 0x80. -// -// For example: -// for (int i : BitMask(0x5)) -> yields 0, 2 -// for (int i : BitMask(0x0000000080800000)) -> yields 2, 3 -// -------------------------------------------------------------------------- -template -class BitMask -{ - static_assert(std::is_unsigned::value, ""); - static_assert(Shift == 0 || Shift == 3, ""); - -public: - // These are useful for unit tests (gunit). - using value_type = int; - using iterator = BitMask; - using const_iterator = BitMask; - - explicit BitMask(T mask) : mask_(mask) {} - - BitMask& operator++() { // ++iterator - mask_ &= (mask_ - 1); // clear the least significant bit set - return *this; - } - - explicit operator bool() const { return mask_ != 0; } - uint32_t operator*() const { return LowestBitSet(); } - - uint32_t LowestBitSet() const { - return priv::TrailingZeros(mask_) >> Shift; - } - - uint32_t HighestBitSet() const { - return (sizeof(T) * CHAR_BIT - priv::LeadingZeros(mask_) - 1) >> Shift; - } - - BitMask begin() const { return *this; } - BitMask end() const { return BitMask(0); } - - uint32_t TrailingZeros() const { - return priv::TrailingZeros(mask_) >> Shift; - } - - uint32_t LeadingZeros() const { - constexpr uint32_t total_significant_bits = SignificantBits << Shift; - constexpr uint32_t extra_bits = sizeof(T) * 8 - total_significant_bits; - return priv::LeadingZeros(mask_ << extra_bits) >> Shift; - } - -private: - friend bool operator==(const BitMask& a, const BitMask& b) { - return a.mask_ == b.mask_; - } - friend bool operator!=(const BitMask& a, const BitMask& b) { - return a.mask_ != b.mask_; - } - - T mask_; -}; - -// -------------------------------------------------------------------------- -using ctrl_t = signed char; -using h2_t = uint8_t; - -// -------------------------------------------------------------------------- -// The values here are selected for maximum performance. See the static asserts -// below for details. -// -------------------------------------------------------------------------- -enum Ctrl : ctrl_t -{ - kEmpty = -128, // 0b10000000 or 0x80 - kDeleted = -2, // 0b11111110 or 0xfe - kSentinel = -1, // 0b11111111 or 0xff -}; - -static_assert( - kEmpty & kDeleted & kSentinel & 0x80, - "Special markers need to have the MSB to make checking for them efficient"); -static_assert(kEmpty < kSentinel && kDeleted < kSentinel, - "kEmpty and kDeleted must be smaller than kSentinel to make the " - "SIMD test of IsEmptyOrDeleted() efficient"); -static_assert(kSentinel == -1, - "kSentinel must be -1 to elide loading it from memory into SIMD " - "registers (pcmpeqd xmm, xmm)"); -static_assert(kEmpty == -128, - "kEmpty must be -128 to make the SIMD check for its " - "existence efficient (psignb xmm, xmm)"); -static_assert(~kEmpty & ~kDeleted & kSentinel & 0x7F, - "kEmpty and kDeleted must share an unset bit that is not shared " - "by kSentinel to make the scalar test for MatchEmptyOrDeleted() " - "efficient"); -static_assert(kDeleted == -2, - "kDeleted must be -2 to make the implementation of " - "ConvertSpecialToEmptyAndFullToDeleted efficient"); - -// -------------------------------------------------------------------------- -// A single block of empty control bytes for tables without any slots allocated. -// This enables removing a branch in the hot path of find(). -// -------------------------------------------------------------------------- -inline ctrl_t* EmptyGroup() { - alignas(16) static constexpr ctrl_t empty_group[] = { - kSentinel, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, - kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty}; - return const_cast(empty_group); -} - -// -------------------------------------------------------------------------- -inline size_t HashSeed(const ctrl_t* ctrl) { - // The low bits of the pointer have little or no entropy because of - // alignment. We shift the pointer to try to use higher entropy bits. A - // good number seems to be 12 bits, because that aligns with page size. - return reinterpret_cast(ctrl) >> 12; -} - -#ifdef PHMAP_NON_DETERMINISTIC - -inline size_t H1(size_t hashval, const ctrl_t* ctrl) { - // use ctrl_ pointer to add entropy to ensure - // non-deterministic iteration order. - return (hashval >> 7) ^ HashSeed(ctrl); -} - -#else - -inline size_t H1(size_t hashval, const ctrl_t* ) { - return (hashval >> 7); -} - -#endif - - -inline h2_t H2(size_t hashval) { return (ctrl_t)(hashval & 0x7F); } - -inline bool IsEmpty(ctrl_t c) { return c == kEmpty; } -inline bool IsFull(ctrl_t c) { return c >= static_cast(0); } -inline bool IsDeleted(ctrl_t c) { return c == kDeleted; } -inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; } - -#if PHMAP_HAVE_SSE2 - -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable : 4365) // conversion from 'int' to 'T', signed/unsigned mismatch -#endif - -// -------------------------------------------------------------------------- -// https://github.com/abseil/abseil-cpp/issues/209 -// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853 -// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char -// Work around this by using the portable implementation of Group -// when using -funsigned-char under GCC. -// -------------------------------------------------------------------------- -inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) { -#if defined(__GNUC__) && !defined(__clang__) - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Woverflow" - - if (std::is_unsigned::value) { - const __m128i mask = _mm_set1_epi8(static_cast(0x80)); - const __m128i diff = _mm_subs_epi8(b, a); - return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask); - } - - #pragma GCC diagnostic pop -#endif - return _mm_cmpgt_epi8(a, b); -} - -// -------------------------------------------------------------------------- -// -------------------------------------------------------------------------- -struct GroupSse2Impl -{ - enum { kWidth = 16 }; // the number of slots per group - - explicit GroupSse2Impl(const ctrl_t* pos) { - ctrl = _mm_loadu_si128(reinterpret_cast(pos)); - } - - // Returns a bitmask representing the positions of slots that match hash. - // ---------------------------------------------------------------------- - BitMask Match(h2_t hash) const { - auto match = _mm_set1_epi8((char)hash); - return BitMask( - static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); - } - - // Returns a bitmask representing the positions of empty slots. - // ------------------------------------------------------------ - BitMask MatchEmpty() const { -#if PHMAP_HAVE_SSSE3 - // This only works because kEmpty is -128. - return BitMask( - static_cast(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)))); -#else - return Match(static_cast(kEmpty)); -#endif - } - -#ifdef __INTEL_COMPILER -#pragma warning push -#pragma warning disable 68 -#endif - // Returns a bitmask representing the positions of empty or deleted slots. - // ----------------------------------------------------------------------- - BitMask MatchEmptyOrDeleted() const { - auto special = _mm_set1_epi8(static_cast(kSentinel)); - return BitMask( - static_cast(_mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)))); - } - - // Returns the number of trailing empty or deleted elements in the group. - // ---------------------------------------------------------------------- - uint32_t CountLeadingEmptyOrDeleted() const { - auto special = _mm_set1_epi8(static_cast(kSentinel)); - return TrailingZeros( - static_cast(_mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1)); - } -#ifdef __INTEL_COMPILER -#pragma warning pop -#endif - - // ---------------------------------------------------------------------- - void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { - auto msbs = _mm_set1_epi8(static_cast(-128)); - auto x126 = _mm_set1_epi8(126); -#if PHMAP_HAVE_SSSE3 - auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs); -#else - auto zero = _mm_setzero_si128(); - auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl); - auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126)); -#endif - _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res); - } - - __m128i ctrl; -}; - -#ifdef _MSC_VER - #pragma warning(pop) -#endif - -#endif // PHMAP_HAVE_SSE2 - -// -------------------------------------------------------------------------- -// -------------------------------------------------------------------------- -struct GroupPortableImpl -{ - enum { kWidth = 8 }; - - explicit GroupPortableImpl(const ctrl_t* pos) - : ctrl(little_endian::Load64(pos)) {} - - BitMask Match(h2_t hash) const { - // For the technique, see: - // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord - // (Determine if a word has a byte equal to n). - // - // Caveat: there are false positives but: - // - they only occur if there is a real match - // - they never occur on kEmpty, kDeleted, kSentinel - // - they will be handled gracefully by subsequent checks in code - // - // Example: - // v = 0x1716151413121110 - // hash = 0x12 - // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000 - constexpr uint64_t msbs = 0x8080808080808080ULL; - constexpr uint64_t lsbs = 0x0101010101010101ULL; - auto x = ctrl ^ (lsbs * hash); - return BitMask((x - lsbs) & ~x & msbs); - } - - BitMask MatchEmpty() const { // bit 1 of each byte is 0 for empty (but not for deleted) - constexpr uint64_t msbs = 0x8080808080808080ULL; - return BitMask((ctrl & (~ctrl << 6)) & msbs); - } - - BitMask MatchEmptyOrDeleted() const { // lsb of each byte is 0 for empty or deleted - constexpr uint64_t msbs = 0x8080808080808080ULL; - return BitMask((ctrl & (~ctrl << 7)) & msbs); - } - - uint32_t CountLeadingEmptyOrDeleted() const { - constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL; - return (uint32_t)((TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3); - } - - void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { - constexpr uint64_t msbs = 0x8080808080808080ULL; - constexpr uint64_t lsbs = 0x0101010101010101ULL; - auto x = ctrl & msbs; - auto res = (~x + (x >> 7)) & ~lsbs; - little_endian::Store64(dst, res); - } - - uint64_t ctrl; -}; - -#if PHMAP_HAVE_SSE2 - using Group = GroupSse2Impl; -#else - using Group = GroupPortableImpl; -#endif - -// The number of cloned control bytes that we copy from the beginning to the -// end of the control bytes array. -// ------------------------------------------------------------------------- -constexpr size_t NumClonedBytes() { return Group::kWidth - 1; } - -template -class raw_hash_set; - -inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; } - -// -------------------------------------------------------------------------- -// PRECONDITION: -// IsValidCapacity(capacity) -// ctrl[capacity] == kSentinel -// ctrl[i] != kSentinel for all i < capacity -// Applies mapping for every byte in ctrl: -// DELETED -> EMPTY -// EMPTY -> EMPTY -// FULL -> DELETED -// -------------------------------------------------------------------------- -inline void ConvertDeletedToEmptyAndFullToDeleted( - ctrl_t* ctrl, size_t capacity) -{ - assert(ctrl[capacity] == kSentinel); - assert(IsValidCapacity(capacity)); - for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) { - Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos); - } - // Copy the cloned ctrl bytes. - std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth); - ctrl[capacity] = kSentinel; -} - -// -------------------------------------------------------------------------- -// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1. -// -------------------------------------------------------------------------- -inline size_t NormalizeCapacity(size_t n) -{ - return n ? ~size_t{} >> LeadingZeros(n) : 1; -} - -// -------------------------------------------------------------------------- -// We use 7/8th as maximum load factor. -// For 16-wide groups, that gives an average of two empty slots per group. -// -------------------------------------------------------------------------- -inline size_t CapacityToGrowth(size_t capacity) -{ - assert(IsValidCapacity(capacity)); - // `capacity*7/8` - PHMAP_IF_CONSTEXPR (Group::kWidth == 8) { - if (capacity == 7) - { - // x-x/8 does not work when x==7. - return 6; - } - } - return capacity - capacity / 8; -} - -// -------------------------------------------------------------------------- -// From desired "growth" to a lowerbound of the necessary capacity. -// Might not be a valid one and required NormalizeCapacity(). -// -------------------------------------------------------------------------- -inline size_t GrowthToLowerboundCapacity(size_t growth) -{ - // `growth*8/7` - PHMAP_IF_CONSTEXPR (Group::kWidth == 8) { - if (growth == 7) - { - // x+(x-1)/7 does not work when x==7. - return 8; - } - } - return growth + static_cast((static_cast(growth) - 1) / 7); -} - -namespace hashtable_debug_internal { - -// If it is a map, call get<0>(). -using std::get; -template -auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) { - return get<0>(pair); -} - -// If it is not a map, return the value directly. -template -const typename T::key_type& GetKey(const typename T::key_type& key, char) { - return key; -} - -// -------------------------------------------------------------------------- -// Containers should specialize this to provide debug information for that -// container. -// -------------------------------------------------------------------------- -template -struct HashtableDebugAccess -{ - // Returns the number of probes required to find `key` in `c`. The "number of - // probes" is a concept that can vary by container. Implementations should - // return 0 when `key` was found in the minimum number of operations and - // should increment the result for each non-trivial operation required to find - // `key`. - // - // The default implementation uses the bucket api from the standard and thus - // works for `std::unordered_*` containers. - // -------------------------------------------------------------------------- - static size_t GetNumProbes(const Container& c, - const typename Container::key_type& key) { - if (!c.bucket_count()) return {}; - size_t num_probes = 0; - size_t bucket = c.bucket(key); - for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) { - if (it == e) return num_probes; - if (c.key_eq()(key, GetKey(*it, 0))) return num_probes; - } - } -}; - -} // namespace hashtable_debug_internal - -// ---------------------------------------------------------------------------- -// I N F O Z S T U B S -// ---------------------------------------------------------------------------- -struct HashtablezInfo -{ - void PrepareForSampling() {} -}; - -inline void RecordRehashSlow(HashtablezInfo*, size_t ) {} - -static inline void RecordInsertSlow(HashtablezInfo* , size_t, size_t ) {} - -static inline void RecordEraseSlow(HashtablezInfo*) {} - -static inline HashtablezInfo* SampleSlow(int64_t*) { return nullptr; } -static inline void UnsampleSlow(HashtablezInfo* ) {} - -class HashtablezInfoHandle -{ -public: - inline void RecordStorageChanged(size_t , size_t ) {} - inline void RecordRehash(size_t ) {} - inline void RecordInsert(size_t , size_t ) {} - inline void RecordErase() {} - friend inline void swap(HashtablezInfoHandle& , - HashtablezInfoHandle& ) noexcept {} -}; - -static inline HashtablezInfoHandle Sample() { return HashtablezInfoHandle(); } - -class HashtablezSampler -{ -public: - // Returns a global Sampler. - static HashtablezSampler& Global() { static HashtablezSampler hzs; return hzs; } - HashtablezInfo* Register() { static HashtablezInfo info; return &info; } - void Unregister(HashtablezInfo* ) {} - - using DisposeCallback = void (*)(const HashtablezInfo&); - DisposeCallback SetDisposeCallback(DisposeCallback ) { return nullptr; } - int64_t Iterate(const std::function& ) { return 0; } -}; - -static inline void SetHashtablezEnabled(bool ) {} -static inline void SetHashtablezSampleParameter(int32_t ) {} -static inline void SetHashtablezMaxSamples(int32_t ) {} - - -namespace memory_internal { - -// Constructs T into uninitialized storage pointed by `ptr` using the args -// specified in the tuple. -// ---------------------------------------------------------------------------- -template -void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t, - phmap::index_sequence) { - phmap::allocator_traits::construct( - *alloc, ptr, std::get(std::forward(t))...); -} - -template -struct WithConstructedImplF { - template - decltype(std::declval()(std::declval())) operator()( - Args&&... args) const { - return std::forward(f)(T(std::forward(args)...)); - } - F&& f; -}; - -template -decltype(std::declval()(std::declval())) WithConstructedImpl( - Tuple&& t, phmap::index_sequence, F&& f) { - return WithConstructedImplF{std::forward(f)}( - std::get(std::forward(t))...); -} - -template -auto TupleRefImpl(T&& t, phmap::index_sequence) - -> decltype(std::forward_as_tuple(std::get(std::forward(t))...)) { - return std::forward_as_tuple(std::get(std::forward(t))...); -} - -// Returns a tuple of references to the elements of the input tuple. T must be a -// tuple. -// ---------------------------------------------------------------------------- -template -auto TupleRef(T&& t) -> decltype( - TupleRefImpl(std::forward(t), - phmap::make_index_sequence< - std::tuple_size::type>::value>())) { - return TupleRefImpl( - std::forward(t), - phmap::make_index_sequence< - std::tuple_size::type>::value>()); -} - -template -decltype(std::declval()(std::declval(), std::piecewise_construct, - std::declval>(), std::declval())) -DecomposePairImpl(F&& f, std::pair, V> p) { - const auto& key = std::get<0>(p.first); - return std::forward(f)(key, std::piecewise_construct, std::move(p.first), - std::move(p.second)); -} - -} // namespace memory_internal - - -// ---------------------------------------------------------------------------- -// R A W _ H A S H _ S E T -// ---------------------------------------------------------------------------- -// An open-addressing -// hashtable with quadratic probing. -// -// This is a low level hashtable on top of which different interfaces can be -// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc. -// -// The table interface is similar to that of std::unordered_set. Notable -// differences are that most member functions support heterogeneous keys when -// BOTH the hash and eq functions are marked as transparent. They do so by -// providing a typedef called `is_transparent`. -// -// When heterogeneous lookup is enabled, functions that take key_type act as if -// they have an overload set like: -// -// iterator find(const key_type& key); -// template -// iterator find(const K& key); -// -// size_type erase(const key_type& key); -// template -// size_type erase(const K& key); -// -// std::pair equal_range(const key_type& key); -// template -// std::pair equal_range(const K& key); -// -// When heterogeneous lookup is disabled, only the explicit `key_type` overloads -// exist. -// -// find() also supports passing the hash explicitly: -// -// iterator find(const key_type& key, size_t hash); -// template -// iterator find(const U& key, size_t hash); -// -// In addition the pointer to element and iterator stability guarantees are -// weaker: all iterators and pointers are invalidated after a new element is -// inserted. -// -// IMPLEMENTATION DETAILS -// -// The table stores elements inline in a slot array. In addition to the slot -// array the table maintains some control state per slot. The extra state is one -// byte per slot and stores empty or deleted marks, or alternatively 7 bits from -// the hash of an occupied slot. The table is split into logical groups of -// slots, like so: -// -// Group 1 Group 2 Group 3 -// +---------------+---------------+---------------+ -// | | | | | | | | | | | | | | | | | | | | | | | | | -// +---------------+---------------+---------------+ -// -// On lookup the hash is split into two parts: -// - H2: 7 bits (those stored in the control bytes) -// - H1: the rest of the bits -// The groups are probed using H1. For each group the slots are matched to H2 in -// parallel. Because H2 is 7 bits (128 states) and the number of slots per group -// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit. -// -// On insert, once the right group is found (as in lookup), its slots are -// filled in order. -// -// On erase a slot is cleared. In case the group did not have any empty slots -// before the erase, the erased slot is marked as deleted. -// -// Groups without empty slots (but maybe with deleted slots) extend the probe -// sequence. The probing algorithm is quadratic. Given N the number of groups, -// the probing function for the i'th probe is: -// -// P(0) = H1 % N -// -// P(i) = (P(i - 1) + i) % N -// -// This probing function guarantees that after N probes, all the groups of the -// table will be probed exactly once. -// ---------------------------------------------------------------------------- -template -class raw_hash_set -{ - using PolicyTraits = hash_policy_traits; - using KeyArgImpl = - KeyArg::value && IsTransparent::value>; - -public: - using init_type = typename PolicyTraits::init_type; - using key_type = typename PolicyTraits::key_type; - // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user - // code fixes! - using slot_type = typename PolicyTraits::slot_type; - using allocator_type = Alloc; - using size_type = size_t; - using difference_type = ptrdiff_t; - using hasher = Hash; - using key_equal = Eq; - using policy_type = Policy; - using value_type = typename PolicyTraits::value_type; - using reference = value_type&; - using const_reference = const value_type&; - using pointer = typename phmap::allocator_traits< - allocator_type>::template rebind_traits::pointer; - using const_pointer = typename phmap::allocator_traits< - allocator_type>::template rebind_traits::const_pointer; - - // Alias used for heterogeneous lookup functions. - // `key_arg` evaluates to `K` when the functors are transparent and to - // `key_type` otherwise. It permits template argument deduction on `K` for the - // transparent case. - template - using key_arg = typename KeyArgImpl::template type; - -private: - // Give an early error when key_type is not hashable/eq. - auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); - auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); - - using Layout = phmap::priv::Layout; - - static Layout MakeLayout(size_t capacity) { - assert(IsValidCapacity(capacity)); - return Layout(capacity + Group::kWidth + 1, capacity); - } - - using AllocTraits = phmap::allocator_traits; - using SlotAlloc = typename phmap::allocator_traits< - allocator_type>::template rebind_alloc; - using SlotAllocTraits = typename phmap::allocator_traits< - allocator_type>::template rebind_traits; - - static_assert(std::is_lvalue_reference::value, - "Policy::element() must return a reference"); - - template - struct SameAsElementReference - : std::is_same::type>::type, - typename std::remove_cv< - typename std::remove_reference::type>::type> {}; - - // An enabler for insert(T&&): T must be convertible to init_type or be the - // same as [cv] value_type [ref]. - // Note: we separate SameAsElementReference into its own type to avoid using - // reference unless we need to. MSVC doesn't seem to like it in some - // cases. - template - using RequiresInsertable = typename std::enable_if< - phmap::disjunction, - SameAsElementReference>::value, - int>::type; - - // RequiresNotInit is a workaround for gcc prior to 7.1. - // See https://godbolt.org/g/Y4xsUh. - template - using RequiresNotInit = - typename std::enable_if::value, int>::type; - - template - using IsDecomposable = IsDecomposable; - -public: - static_assert(std::is_same::value, - "Allocators with custom pointer types are not supported"); - static_assert(std::is_same::value, - "Allocators with custom pointer types are not supported"); - - class iterator - { - friend class raw_hash_set; - - public: - using iterator_category = std::forward_iterator_tag; - using value_type = typename raw_hash_set::value_type; - using reference = - phmap::conditional_t; - using pointer = phmap::remove_reference_t*; - using difference_type = typename raw_hash_set::difference_type; - - iterator() {} - - // PRECONDITION: not an end() iterator. - reference operator*() const { return PolicyTraits::element(slot_); } - - // PRECONDITION: not an end() iterator. - pointer operator->() const { return &operator*(); } - - // PRECONDITION: not an end() iterator. - iterator& operator++() { - ++ctrl_; - ++slot_; - skip_empty_or_deleted(); - return *this; - } - // PRECONDITION: not an end() iterator. - iterator operator++(int) { - auto tmp = *this; - ++*this; - return tmp; - } - -#if PHMAP_BIDIRECTIONAL - // PRECONDITION: not a begin() iterator. - iterator& operator--() { - assert(ctrl_); - do { - --ctrl_; - --slot_; - } while (IsEmptyOrDeleted(*ctrl_)); - return *this; - } - - // PRECONDITION: not a begin() iterator. - iterator operator--(int) { - auto tmp = *this; - --*this; - return tmp; - } -#endif - - friend bool operator==(const iterator& a, const iterator& b) { - return a.ctrl_ == b.ctrl_; - } - friend bool operator!=(const iterator& a, const iterator& b) { - return !(a == b); - } - - private: - iterator(ctrl_t* ctrl) : ctrl_(ctrl) {} // for end() - iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {} - - void skip_empty_or_deleted() { - while (IsEmptyOrDeleted(*ctrl_)) { - // ctrl is not necessarily aligned to Group::kWidth. It is also likely - // to read past the space for ctrl bytes and into slots. This is ok - // because ctrl has sizeof() == 1 and slot has sizeof() >= 1 so there - // is no way to read outside the combined slot array. - uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted(); - ctrl_ += shift; - slot_ += shift; - } - } - - ctrl_t* ctrl_ = nullptr; - // To avoid uninitialized member warnings, put slot_ in an anonymous union. - // The member is not initialized on singleton and end iterators. - union { - slot_type* slot_; - }; - }; - - class const_iterator - { - friend class raw_hash_set; - - public: - using iterator_category = typename iterator::iterator_category; - using value_type = typename raw_hash_set::value_type; - using reference = typename raw_hash_set::const_reference; - using pointer = typename raw_hash_set::const_pointer; - using difference_type = typename raw_hash_set::difference_type; - - const_iterator() {} - // Implicit construction from iterator. - const_iterator(iterator i) : inner_(std::move(i)) {} - - reference operator*() const { return *inner_; } - pointer operator->() const { return inner_.operator->(); } - - const_iterator& operator++() { - ++inner_; - return *this; - } - const_iterator operator++(int) { return inner_++; } - - friend bool operator==(const const_iterator& a, const const_iterator& b) { - return a.inner_ == b.inner_; - } - friend bool operator!=(const const_iterator& a, const const_iterator& b) { - return !(a == b); - } - - private: - const_iterator(const ctrl_t* ctrl, const slot_type* slot) - : inner_(const_cast(ctrl), const_cast(slot)) {} - - iterator inner_; - }; - - using node_type = node_handle, Alloc>; - using insert_return_type = InsertReturnType; - - raw_hash_set() noexcept( - std::is_nothrow_default_constructible::value&& - std::is_nothrow_default_constructible::value&& - std::is_nothrow_default_constructible::value) {} - - explicit raw_hash_set(size_t bucket_cnt, const hasher& hashfn = hasher(), - const key_equal& eq = key_equal(), - const allocator_type& alloc = allocator_type()) - : ctrl_(EmptyGroup()), settings_(0, hashfn, eq, alloc) { - if (bucket_cnt) { - size_t new_capacity = NormalizeCapacity(bucket_cnt); - reset_growth_left(new_capacity); - initialize_slots(new_capacity); - capacity_ = new_capacity; - } - } - - raw_hash_set(size_t bucket_cnt, const hasher& hashfn, - const allocator_type& alloc) - : raw_hash_set(bucket_cnt, hashfn, key_equal(), alloc) {} - - raw_hash_set(size_t bucket_cnt, const allocator_type& alloc) - : raw_hash_set(bucket_cnt, hasher(), key_equal(), alloc) {} - - explicit raw_hash_set(const allocator_type& alloc) - : raw_hash_set(0, hasher(), key_equal(), alloc) {} - - template - raw_hash_set(InputIter first, InputIter last, size_t bucket_cnt = 0, - const hasher& hashfn = hasher(), const key_equal& eq = key_equal(), - const allocator_type& alloc = allocator_type()) - : raw_hash_set(bucket_cnt, hashfn, eq, alloc) { - insert(first, last); - } - - template - raw_hash_set(InputIter first, InputIter last, size_t bucket_cnt, - const hasher& hashfn, const allocator_type& alloc) - : raw_hash_set(first, last, bucket_cnt, hashfn, key_equal(), alloc) {} - - template - raw_hash_set(InputIter first, InputIter last, size_t bucket_cnt, - const allocator_type& alloc) - : raw_hash_set(first, last, bucket_cnt, hasher(), key_equal(), alloc) {} - - template - raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc) - : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {} - - // Instead of accepting std::initializer_list as the first - // argument like std::unordered_set does, we have two overloads - // that accept std::initializer_list and std::initializer_list. - // This is advantageous for performance. - // - // // Turns {"abc", "def"} into std::initializer_list, then - // // copies the strings into the set. - // std::unordered_set s = {"abc", "def"}; - // - // // Turns {"abc", "def"} into std::initializer_list, then - // // copies the strings into the set. - // phmap::flat_hash_set s = {"abc", "def"}; - // - // The same trick is used in insert(). - // - // The enabler is necessary to prevent this constructor from triggering where - // the copy constructor is meant to be called. - // - // phmap::flat_hash_set a, b{a}; - // - // RequiresNotInit is a workaround for gcc prior to 7.1. - template = 0, RequiresInsertable = 0> - raw_hash_set(std::initializer_list init, size_t bucket_cnt = 0, - const hasher& hashfn = hasher(), const key_equal& eq = key_equal(), - const allocator_type& alloc = allocator_type()) - : raw_hash_set(init.begin(), init.end(), bucket_cnt, hashfn, eq, alloc) {} - - raw_hash_set(std::initializer_list init, size_t bucket_cnt = 0, - const hasher& hashfn = hasher(), const key_equal& eq = key_equal(), - const allocator_type& alloc = allocator_type()) - : raw_hash_set(init.begin(), init.end(), bucket_cnt, hashfn, eq, alloc) {} - - template = 0, RequiresInsertable = 0> - raw_hash_set(std::initializer_list init, size_t bucket_cnt, - const hasher& hashfn, const allocator_type& alloc) - : raw_hash_set(init, bucket_cnt, hashfn, key_equal(), alloc) {} - - raw_hash_set(std::initializer_list init, size_t bucket_cnt, - const hasher& hashfn, const allocator_type& alloc) - : raw_hash_set(init, bucket_cnt, hashfn, key_equal(), alloc) {} - - template = 0, RequiresInsertable = 0> - raw_hash_set(std::initializer_list init, size_t bucket_cnt, - const allocator_type& alloc) - : raw_hash_set(init, bucket_cnt, hasher(), key_equal(), alloc) {} - - raw_hash_set(std::initializer_list init, size_t bucket_cnt, - const allocator_type& alloc) - : raw_hash_set(init, bucket_cnt, hasher(), key_equal(), alloc) {} - - template = 0, RequiresInsertable = 0> - raw_hash_set(std::initializer_list init, const allocator_type& alloc) - : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} - - raw_hash_set(std::initializer_list init, - const allocator_type& alloc) - : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} - - raw_hash_set(const raw_hash_set& that) - : raw_hash_set(that, AllocTraits::select_on_container_copy_construction( - that.alloc_ref())) {} - - raw_hash_set(const raw_hash_set& that, const allocator_type& a) - : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) { - rehash(that.capacity()); // operator=() should preserve load_factor - // Because the table is guaranteed to be empty, we can do something faster - // than a full `insert`. - for (const auto& v : that) { - const size_t hashval = PolicyTraits::apply(HashElement{hash_ref()}, v); - auto target = find_first_non_full(hashval); - set_ctrl(target.offset, H2(hashval)); - emplace_at(target.offset, v); - infoz_.RecordInsert(hashval, target.probe_length); - } - size_ = that.size(); - growth_left() -= that.size(); - } - - raw_hash_set(raw_hash_set&& that) noexcept( - std::is_nothrow_copy_constructible::value&& - std::is_nothrow_copy_constructible::value&& - std::is_nothrow_copy_constructible::value) - : ctrl_(phmap::exchange(that.ctrl_, EmptyGroup())), - slots_(phmap::exchange(that.slots_, nullptr)), - size_(phmap::exchange(that.size_, 0)), - capacity_(phmap::exchange(that.capacity_, 0)), - infoz_(phmap::exchange(that.infoz_, HashtablezInfoHandle())), - // Hash, equality and allocator are copied instead of moved because - // `that` must be left valid. If Hash is std::function, moving it - // would create a nullptr functor that cannot be called. - settings_(that.settings_) { - // growth_left was copied above, reset the one from `that`. - that.growth_left() = 0; - } - - raw_hash_set(raw_hash_set&& that, const allocator_type& a) - : ctrl_(EmptyGroup()), - slots_(nullptr), - size_(0), - capacity_(0), - settings_(0, that.hash_ref(), that.eq_ref(), a) { - if (a == that.alloc_ref()) { - std::swap(ctrl_, that.ctrl_); - std::swap(slots_, that.slots_); - std::swap(size_, that.size_); - std::swap(capacity_, that.capacity_); - std::swap(growth_left(), that.growth_left()); - std::swap(infoz_, that.infoz_); - } else { - reserve(that.size()); - // Note: this will copy elements of dense_set and unordered_set instead of - // moving them. This can be fixed if it ever becomes an issue. - for (auto& elem : that) insert(std::move(elem)); - } - } - - raw_hash_set& operator=(const raw_hash_set& that) { - raw_hash_set tmp(that, - AllocTraits::propagate_on_container_copy_assignment::value - ? that.alloc_ref() - : alloc_ref()); - swap(tmp); - return *this; - } - - raw_hash_set& operator=(raw_hash_set&& that) noexcept( - phmap::allocator_traits::is_always_equal::value&& - std::is_nothrow_move_assignable::value&& - std::is_nothrow_move_assignable::value) { - // TODO(sbenza): We should only use the operations from the noexcept clause - // to make sure we actually adhere to that contract. - return move_assign( - std::move(that), - typename AllocTraits::propagate_on_container_move_assignment()); - } - - ~raw_hash_set() { destroy_slots(); } - - iterator begin() { - auto it = iterator_at(0); - it.skip_empty_or_deleted(); - return it; - } - iterator end() - { -#if PHMAP_BIDIRECTIONAL - return iterator_at(capacity_); -#else - return {ctrl_ + capacity_}; -#endif - } - - const_iterator begin() const { - return const_cast(this)->begin(); - } - const_iterator end() const { return const_cast(this)->end(); } - const_iterator cbegin() const { return begin(); } - const_iterator cend() const { return end(); } - - bool empty() const { return !size(); } - size_t size() const { return size_; } - size_t capacity() const { return capacity_; } - size_t max_size() const { return (std::numeric_limits::max)(); } - - PHMAP_ATTRIBUTE_REINITIALIZES void clear() { - // Iterating over this container is O(bucket_count()). When bucket_count() - // is much greater than size(), iteration becomes prohibitively expensive. - // For clear() it is more important to reuse the allocated array when the - // container is small because allocation takes comparatively long time - // compared to destruction of the elements of the container. So we pick the - // largest bucket_count() threshold for which iteration is still fast and - // past that we simply deallocate the array. - if (empty()) - return; - if (capacity_ > 127) { - destroy_slots(); - } else if (capacity_) { - for (size_t i = 0; i != capacity_; ++i) { - if (IsFull(ctrl_[i])) { - PolicyTraits::destroy(&alloc_ref(), slots_ + i); - } - } - size_ = 0; - reset_ctrl(capacity_); - reset_growth_left(capacity_); - } - assert(empty()); - infoz_.RecordStorageChanged(0, capacity_); - } - - // This overload kicks in when the argument is an rvalue of insertable and - // decomposable type other than init_type. - // - // flat_hash_map m; - // m.insert(std::make_pair("abc", 42)); - template = 0, - typename std::enable_if::value, int>::type = 0, - T* = nullptr> - std::pair insert(T&& value) { - return emplace(std::forward(value)); - } - - // This overload kicks in when the argument is a bitfield or an lvalue of - // insertable and decomposable type. - // - // union { int n : 1; }; - // flat_hash_set s; - // s.insert(n); - // - // flat_hash_set s; - // const char* p = "hello"; - // s.insert(p); - // - // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace - // RequiresInsertable with RequiresInsertable. - // We are hitting this bug: https://godbolt.org/g/1Vht4f. - template = 0, - typename std::enable_if::value, int>::type = 0> - std::pair insert(const T& value) { - return emplace(value); - } - - // This overload kicks in when the argument is an rvalue of init_type. Its - // purpose is to handle brace-init-list arguments. - // - // flat_hash_set s; - // s.insert({"abc", 42}); - std::pair insert(init_type&& value) { - return emplace(std::move(value)); - } - - template = 0, - typename std::enable_if::value, int>::type = 0, - T* = nullptr> - iterator insert(const_iterator, T&& value) { - return insert(std::forward(value)).first; - } - - // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace - // RequiresInsertable with RequiresInsertable. - // We are hitting this bug: https://godbolt.org/g/1Vht4f. - template = 0, - typename std::enable_if::value, int>::type = 0> - iterator insert(const_iterator, const T& value) { - return insert(value).first; - } - - iterator insert(const_iterator, init_type&& value) { - return insert(std::move(value)).first; - } - - template - using IsRandomAccess = std::is_same::iterator_category, - std::random_access_iterator_tag>; - - - template - struct has_difference_operator - { - private: - using yes = std::true_type; - using no = std::false_type; - - template static auto test(int) -> decltype(std::declval() - std::declval() == 1, yes()); - template static no test(...); - - public: - static constexpr bool value = std::is_same(0)), yes>::value; - }; - - template ::value, int> = 0> - void insert(InputIt first, InputIt last) { - this->reserve(this->size() + (last - first)); - for (; first != last; ++first) - emplace(*first); - } - - template ::value, int> = 0> - void insert(InputIt first, InputIt last) { - for (; first != last; ++first) - emplace(*first); - } - - template = 0, RequiresInsertable = 0> - void insert(std::initializer_list ilist) { - insert(ilist.begin(), ilist.end()); - } - - void insert(std::initializer_list ilist) { - insert(ilist.begin(), ilist.end()); - } - - insert_return_type insert(node_type&& node) { - if (!node) return {end(), false, node_type()}; - const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node)); - auto res = PolicyTraits::apply( - InsertSlot{*this, std::move(*CommonAccess::GetSlot(node))}, - elem); - if (res.second) { - CommonAccess::Reset(&node); - return {res.first, true, node_type()}; - } else { - return {res.first, false, std::move(node)}; - } - } - - insert_return_type insert(node_type&& node, size_t hashval) { - if (!node) return {end(), false, node_type()}; - const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node)); - auto res = PolicyTraits::apply( - InsertSlotWithHash{*this, std::move(*CommonAccess::GetSlot(node)), hashval}, - elem); - if (res.second) { - CommonAccess::Reset(&node); - return {res.first, true, node_type()}; - } else { - return {res.first, false, std::move(node)}; - } - } - - iterator insert(const_iterator, node_type&& node) { - auto res = insert(std::move(node)); - node = std::move(res.node); - return res.position; - } - - // This overload kicks in if we can deduce the key from args. This enables us - // to avoid constructing value_type if an entry with the same key already - // exists. - // - // For example: - // - // flat_hash_map m = {{"abc", "def"}}; - // // Creates no std::string copies and makes no heap allocations. - // m.emplace("abc", "xyz"); - template ::value, int>::type = 0> - std::pair emplace(Args&&... args) { - return PolicyTraits::apply(EmplaceDecomposable{*this}, - std::forward(args)...); - } - - template ::value, int>::type = 0> - std::pair emplace_with_hash(size_t hashval, Args&&... args) { - return PolicyTraits::apply(EmplaceDecomposableHashval{*this, hashval}, std::forward(args)...); - } - - // This overload kicks in if we cannot deduce the key from args. It constructs - // value_type unconditionally and then either moves it into the table or - // destroys. - template ::value, int>::type = 0> - std::pair emplace(Args&&... args) { - typename std::aligned_storage::type - raw; - slot_type* slot = reinterpret_cast(&raw); - - PolicyTraits::construct(&alloc_ref(), slot, std::forward(args)...); - const auto& elem = PolicyTraits::element(slot); - return PolicyTraits::apply(InsertSlot{*this, std::move(*slot)}, elem); - } - - template ::value, int>::type = 0> - std::pair emplace_with_hash(size_t hashval, Args&&... args) { - typename std::aligned_storage::type raw; - slot_type* slot = reinterpret_cast(&raw); - - PolicyTraits::construct(&alloc_ref(), slot, std::forward(args)...); - const auto& elem = PolicyTraits::element(slot); - return PolicyTraits::apply(InsertSlotWithHash{*this, std::move(*slot), hashval}, elem); - } - - template - iterator emplace_hint(const_iterator, Args&&... args) { - return emplace(std::forward(args)...).first; - } - - template - iterator emplace_hint_with_hash(size_t hashval, const_iterator, Args&&... args) { - return emplace_with_hash(hashval, std::forward(args)...).first; - } - - // Extension API: support for lazy emplace. - // - // Looks up key in the table. If found, returns the iterator to the element. - // Otherwise calls f with one argument of type raw_hash_set::constructor. f - // MUST call raw_hash_set::constructor with arguments as if a - // raw_hash_set::value_type is constructed, otherwise the behavior is - // undefined. - // - // For example: - // - // std::unordered_set s; - // // Makes ArenaStr even if "abc" is in the map. - // s.insert(ArenaString(&arena, "abc")); - // - // flat_hash_set s; - // // Makes ArenaStr only if "abc" is not in the map. - // s.lazy_emplace("abc", [&](const constructor& ctor) { - // ctor(&arena, "abc"); - // }); - // - // WARNING: This API is currently experimental. If there is a way to implement - // the same thing with the rest of the API, prefer that. - class constructor - { - friend class raw_hash_set; - - public: - template - void operator()(Args&&... args) const { - assert(*slot_); - PolicyTraits::construct(alloc_, *slot_, std::forward(args)...); - *slot_ = nullptr; - } - - private: - constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {} - - allocator_type* alloc_; - slot_type** slot_; - }; - - template - iterator lazy_emplace(const key_arg& key, F&& f) { - auto res = find_or_prepare_insert(key); - if (res.second) { - lazy_emplace_at(res.first, std::forward(f)); - } - return iterator_at(res.first); - } - - template - iterator lazy_emplace_with_hash(const key_arg& key, size_t hashval, F&& f) { - auto res = find_or_prepare_insert(key, hashval); - if (res.second) { - lazy_emplace_at(res.first, std::forward(f)); - } - return iterator_at(res.first); - } - - template - void lazy_emplace_at(size_t& idx, F&& f) { - slot_type* slot = slots_ + idx; - std::forward(f)(constructor(&alloc_ref(), &slot)); - assert(!slot); - } - - template - void emplace_single_with_hash(const key_arg& key, size_t hashval, F&& f) { - auto res = find_or_prepare_insert(key, hashval); - if (res.second) - lazy_emplace_at(res.first, std::forward(f)); - else - _erase(iterator_at(res.first)); - } - - - // Extension API: support for heterogeneous keys. - // - // std::unordered_set s; - // // Turns "abc" into std::string. - // s.erase("abc"); - // - // flat_hash_set s; - // // Uses "abc" directly without copying it into std::string. - // s.erase("abc"); - template - size_type erase(const key_arg& key) { - auto it = find(key); - if (it == end()) return 0; - _erase(it); - return 1; - } - - - iterator erase(const_iterator cit) { return erase(cit.inner_); } - - // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`, - // this method returns void to reduce algorithmic complexity to O(1). In - // order to erase while iterating across a map, use the following idiom (which - // also works for standard containers): - // - // for (auto it = m.begin(), end = m.end(); it != end;) { - // if () { - // m._erase(it++); - // } else { - // ++it; - // } - // } - void _erase(iterator it) { - assert(it != end()); - PolicyTraits::destroy(&alloc_ref(), it.slot_); - erase_meta_only(it); - } - void _erase(const_iterator cit) { _erase(cit.inner_); } - - // This overload is necessary because otherwise erase(const K&) would be - // a better match if non-const iterator is passed as an argument. - iterator erase(iterator it) { - auto res = it; - ++res; - _erase(it); - return res; - } - - iterator erase(const_iterator first, const_iterator last) { - while (first != last) { - _erase(first++); - } - return last.inner_; - } - - // Moves elements from `src` into `this`. - // If the element already exists in `this`, it is left unmodified in `src`. - template - void merge(raw_hash_set& src) { // NOLINT - assert(this != &src); - for (auto it = src.begin(), e = src.end(); it != e; ++it) { - if (PolicyTraits::apply(InsertSlot{*this, std::move(*it.slot_)}, - PolicyTraits::element(it.slot_)) - .second) { - src.erase_meta_only(it); - } - } - } - - template - void merge(raw_hash_set&& src) { - merge(src); - } - - node_type extract(const_iterator position) { - auto node = - CommonAccess::Make(alloc_ref(), position.inner_.slot_); - erase_meta_only(position); - return node; - } - - template < - class K = key_type, - typename std::enable_if::value, int>::type = 0> - node_type extract(const key_arg& key) { - auto it = find(key); - return it == end() ? node_type() : extract(const_iterator{it}); - } - - void swap(raw_hash_set& that) noexcept( - IsNoThrowSwappable() && IsNoThrowSwappable() && - (!AllocTraits::propagate_on_container_swap::value || - IsNoThrowSwappable())) { - using std::swap; - swap(ctrl_, that.ctrl_); - swap(slots_, that.slots_); - swap(size_, that.size_); - swap(capacity_, that.capacity_); - swap(growth_left(), that.growth_left()); - swap(hash_ref(), that.hash_ref()); - swap(eq_ref(), that.eq_ref()); - swap(infoz_, that.infoz_); - if (AllocTraits::propagate_on_container_swap::value) { - swap(alloc_ref(), that.alloc_ref()); - } else { - // If the allocators do not compare equal it is officially undefined - // behavior. We choose to do nothing. - } - } - -#if !defined(PHMAP_NON_DETERMINISTIC) - template - bool phmap_dump(OutputArchive&) const; - - template - bool phmap_load(InputArchive&); -#endif - - void rehash(size_t n) { - if (n == 0 && capacity_ == 0) return; - if (n == 0 && size_ == 0) { - destroy_slots(); - infoz_.RecordStorageChanged(0, 0); - return; - } - // bitor is a faster way of doing `max` here. We will round up to the next - // power-of-2-minus-1, so bitor is good enough. - auto m = NormalizeCapacity((std::max)(n, size())); - // n == 0 unconditionally rehashes as per the standard. - if (n == 0 || m > capacity_) { - resize(m); - } - } - - void reserve(size_t n) { rehash(GrowthToLowerboundCapacity(n)); } - - // Extension API: support for heterogeneous keys. - // - // std::unordered_set s; - // // Turns "abc" into std::string. - // s.count("abc"); - // - // ch_set s; - // // Uses "abc" directly without copying it into std::string. - // s.count("abc"); - template - size_t count(const key_arg& key) const { - return find(key) == end() ? size_t(0) : size_t(1); - } - - // Issues CPU prefetch instructions for the memory needed to find or insert - // a key. Like all lookup functions, this support heterogeneous keys. - // - // NOTE: This is a very low level operation and should not be used without - // specific benchmarks indicating its importance. - void prefetch_hash(size_t hashval) const { - (void)hashval; -#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) - auto seq = probe(hashval); - _mm_prefetch((const char *)(ctrl_ + seq.offset()), _MM_HINT_NTA); - _mm_prefetch((const char *)(slots_ + seq.offset()), _MM_HINT_NTA); -#elif defined(__GNUC__) - auto seq = probe(hashval); - __builtin_prefetch(static_cast(ctrl_ + seq.offset())); - __builtin_prefetch(static_cast(slots_ + seq.offset())); -#endif // __GNUC__ - } - - template - void prefetch(const key_arg& key) const { - prefetch_hash(this->hash(key)); - } - - // The API of find() has two extensions. - // - // 1. The hash can be passed by the user. It must be equal to the hash of the - // key. - // - // 2. The type of the key argument doesn't have to be key_type. This is so - // called heterogeneous key support. - template - iterator find(const key_arg& key, size_t hashval) { - size_t offset; - if (find_impl(key, hashval, offset)) - return iterator_at(offset); - else - return end(); - } - - template - pointer find_ptr(const key_arg& key, size_t hashval) { - size_t offset; - if (find_impl(key, hashval, offset)) - return &PolicyTraits::element(slots_ + offset); - else - return nullptr; - } - - template - iterator find(const key_arg& key) { - return find(key, this->hash(key)); - } - - template - const_iterator find(const key_arg& key, size_t hashval) const { - return const_cast(this)->find(key, hashval); - } - template - const_iterator find(const key_arg& key) const { - return find(key, this->hash(key)); - } - - template - bool contains(const key_arg& key) const { - return find(key) != end(); - } - - template - bool contains(const key_arg& key, size_t hashval) const { - return find(key, hashval) != end(); - } - - template - std::pair equal_range(const key_arg& key) { - auto it = find(key); - if (it != end()) return {it, std::next(it)}; - return {it, it}; - } - template - std::pair equal_range( - const key_arg& key) const { - auto it = find(key); - if (it != end()) return {it, std::next(it)}; - return {it, it}; - } - - size_t bucket_count() const { return capacity_; } - float load_factor() const { - return capacity_ ? static_cast(size()) / capacity_ : 0.0; - } - float max_load_factor() const { return 1.0f; } - void max_load_factor(float) { - // Does nothing. - } - - hasher hash_function() const { return hash_ref(); } // warning: doesn't match internal hash - use hash() member function - key_equal key_eq() const { return eq_ref(); } - allocator_type get_allocator() const { return alloc_ref(); } - - friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) { - if (a.size() != b.size()) return false; - const raw_hash_set* outer = &a; - const raw_hash_set* inner = &b; - if (outer->capacity() > inner->capacity()) - std::swap(outer, inner); - for (const value_type& elem : *outer) - if (!inner->has_element(elem)) return false; - return true; - } - - friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) { - return !(a == b); - } - - friend void swap(raw_hash_set& a, - raw_hash_set& b) noexcept(noexcept(a.swap(b))) { - a.swap(b); - } - - template - size_t hash(const K& key) const { - return HashElement{hash_ref()}(key); - } - -private: - template - friend struct phmap::priv::hashtable_debug_internal::HashtableDebugAccess; - - template - bool find_impl(const key_arg& key, size_t hashval, size_t& offset) { - auto seq = probe(hashval); - while (true) { - Group g{ ctrl_ + seq.offset() }; - for (uint32_t i : g.Match((h2_t)H2(hashval))) { - offset = seq.offset((size_t)i); - if (PHMAP_PREDICT_TRUE(PolicyTraits::apply( - EqualElement{key, eq_ref()}, - PolicyTraits::element(slots_ + offset)))) - return true; - } - if (PHMAP_PREDICT_TRUE(g.MatchEmpty())) - return false; - seq.next(); - } - } - - struct FindElement - { - template - const_iterator operator()(const K& key, Args&&...) const { - return s.find(key); - } - const raw_hash_set& s; - }; - - struct HashElement - { - template - size_t operator()(const K& key, Args&&...) const { - return phmap_mix()(h(key)); - } - const hasher& h; - }; - - template - struct EqualElement - { - template - bool operator()(const K2& lhs, Args&&...) const { - return eq(lhs, rhs); - } - const K1& rhs; - const key_equal& eq; - }; - - template - std::pair emplace_decomposable(const K& key, size_t hashval, - Args&&... args) - { - auto res = find_or_prepare_insert(key, hashval); - if (res.second) { - emplace_at(res.first, std::forward(args)...); - } - return {iterator_at(res.first), res.second}; - } - - struct EmplaceDecomposable - { - template - std::pair operator()(const K& key, Args&&... args) const { - return s.emplace_decomposable(key, s.hash(key), std::forward(args)...); - } - raw_hash_set& s; - }; - - struct EmplaceDecomposableHashval { - template - std::pair operator()(const K& key, Args&&... args) const { - return s.emplace_decomposable(key, hashval, std::forward(args)...); - } - raw_hash_set& s; - size_t hashval; - }; - - template - struct InsertSlot - { - template - std::pair operator()(const K& key, Args&&...) && { - auto res = s.find_or_prepare_insert(key); - if (res.second) { - PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot); - } else if (do_destroy) { - PolicyTraits::destroy(&s.alloc_ref(), &slot); - } - return {s.iterator_at(res.first), res.second}; - } - raw_hash_set& s; - // Constructed slot. Either moved into place or destroyed. - slot_type&& slot; - }; - - template - struct InsertSlotWithHash - { - template - std::pair operator()(const K& key, Args&&...) && { - auto res = s.find_or_prepare_insert(key, hashval); - if (res.second) { - PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot); - } else if (do_destroy) { - PolicyTraits::destroy(&s.alloc_ref(), &slot); - } - return {s.iterator_at(res.first), res.second}; - } - raw_hash_set& s; - // Constructed slot. Either moved into place or destroyed. - slot_type&& slot; - size_t &hashval; - }; - - // "erases" the object from the container, except that it doesn't actually - // destroy the object. It only updates all the metadata of the class. - // This can be used in conjunction with Policy::transfer to move the object to - // another place. - void erase_meta_only(const_iterator it) { - assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator"); - --size_; - const size_t index = (size_t)(it.inner_.ctrl_ - ctrl_); - const size_t index_before = (index - Group::kWidth) & capacity_; - const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty(); - const auto empty_before = Group(ctrl_ + index_before).MatchEmpty(); - - // We count how many consecutive non empties we have to the right and to the - // left of `it`. If the sum is >= kWidth then there is at least one probe - // window that might have seen a full group. - bool was_never_full = - empty_before && empty_after && - static_cast(empty_after.TrailingZeros() + - empty_before.LeadingZeros()) < Group::kWidth; - - set_ctrl(index, was_never_full ? kEmpty : kDeleted); - growth_left() += was_never_full; - infoz_.RecordErase(); - } - - void initialize_slots(size_t new_capacity) { - assert(new_capacity); - if (std::is_same>::value && - slots_ == nullptr) { - infoz_ = Sample(); - } - - auto layout = MakeLayout(new_capacity); - char* mem = static_cast( - Allocate(&alloc_ref(), layout.AllocSize())); - ctrl_ = reinterpret_cast(layout.template Pointer<0>(mem)); - slots_ = layout.template Pointer<1>(mem); - reset_ctrl(new_capacity); - reset_growth_left(new_capacity); - infoz_.RecordStorageChanged(size_, new_capacity); - } - - void destroy_slots() { - if (!capacity_) return; - for (size_t i = 0; i != capacity_; ++i) { - if (IsFull(ctrl_[i])) { - PolicyTraits::destroy(&alloc_ref(), slots_ + i); - } - } - auto layout = MakeLayout(capacity_); - // Unpoison before returning the memory to the allocator. - SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); - Deallocate(&alloc_ref(), ctrl_, layout.AllocSize()); - ctrl_ = EmptyGroup(); - slots_ = nullptr; - size_ = 0; - capacity_ = 0; - growth_left() = 0; - } - - void resize(size_t new_capacity) { - assert(IsValidCapacity(new_capacity)); - auto* old_ctrl = ctrl_; - auto* old_slots = slots_; - const size_t old_capacity = capacity_; - initialize_slots(new_capacity); - capacity_ = new_capacity; - - for (size_t i = 0; i != old_capacity; ++i) { - if (IsFull(old_ctrl[i])) { - size_t hashval = PolicyTraits::apply(HashElement{hash_ref()}, - PolicyTraits::element(old_slots + i)); - auto target = find_first_non_full(hashval); - size_t new_i = target.offset; - set_ctrl(new_i, H2(hashval)); - PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i); - } - } - if (old_capacity) { - SanitizerUnpoisonMemoryRegion(old_slots, - sizeof(slot_type) * old_capacity); - auto layout = MakeLayout(old_capacity); - Deallocate(&alloc_ref(), old_ctrl, - layout.AllocSize()); - } - } - - void drop_deletes_without_resize() PHMAP_ATTRIBUTE_NOINLINE { - assert(IsValidCapacity(capacity_)); - assert(!is_small()); - // Algorithm: - // - mark all DELETED slots as EMPTY - // - mark all FULL slots as DELETED - // - for each slot marked as DELETED - // hash = Hash(element) - // target = find_first_non_full(hash) - // if target is in the same group - // mark slot as FULL - // else if target is EMPTY - // transfer element to target - // mark slot as EMPTY - // mark target as FULL - // else if target is DELETED - // swap current element with target element - // mark target as FULL - // repeat procedure for current slot with moved from element (target) - ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_); - typename std::aligned_storage::type - raw; - slot_type* slot = reinterpret_cast(&raw); - for (size_t i = 0; i != capacity_; ++i) { - if (!IsDeleted(ctrl_[i])) continue; - size_t hashval = PolicyTraits::apply(HashElement{hash_ref()}, - PolicyTraits::element(slots_ + i)); - auto target = find_first_non_full(hashval); - size_t new_i = target.offset; - - // Verify if the old and new i fall within the same group wrt the hashval. - // If they do, we don't need to move the object as it falls already in the - // best probe we can. - const auto probe_index = [&](size_t pos) { - return ((pos - probe(hashval).offset()) & capacity_) / Group::kWidth; - }; - - // Element doesn't move. - if (PHMAP_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) { - set_ctrl(i, H2(hashval)); - continue; - } - if (IsEmpty(ctrl_[new_i])) { - // Transfer element to the empty spot. - // set_ctrl poisons/unpoisons the slots so we have to call it at the - // right time. - set_ctrl(new_i, H2(hashval)); - PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i); - set_ctrl(i, kEmpty); - } else { - assert(IsDeleted(ctrl_[new_i])); - set_ctrl(new_i, H2(hashval)); - // Until we are done rehashing, DELETED marks previously FULL slots. - // Swap i and new_i elements. - PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i); - PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i); - PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot); - --i; // repeat - } - } - reset_growth_left(capacity_); - } - - void rehash_and_grow_if_necessary() { - if (capacity_ == 0) { - resize(1); - } else if (size() <= CapacityToGrowth(capacity()) / 2) { - // Squash DELETED without growing if there is enough capacity. - drop_deletes_without_resize(); - } else { - // Otherwise grow the container. - resize(capacity_ * 2 + 1); - } - } - - bool has_element(const value_type& elem, size_t hashval) const { - auto seq = probe(hashval); - while (true) { - Group g{ctrl_ + seq.offset()}; - for (uint32_t i : g.Match((h2_t)H2(hashval))) { - if (PHMAP_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset((size_t)i)) == - elem)) - return true; - } - if (PHMAP_PREDICT_TRUE(g.MatchEmpty())) return false; - seq.next(); - assert(seq.getindex() < capacity_ && "full table!"); - } - return false; - } - - bool has_element(const value_type& elem) const { - size_t hashval = PolicyTraits::apply(HashElement{hash_ref()}, elem); - return has_element(elem, hashval); - } - - // Probes the raw_hash_set with the probe sequence for hash and returns the - // pointer to the first empty or deleted slot. - // NOTE: this function must work with tables having both kEmpty and kDelete - // in one group. Such tables appears during drop_deletes_without_resize. - // - // This function is very useful when insertions happen and: - // - the input is already a set - // - there are enough slots - // - the element with the hash is not in the table - struct FindInfo - { - size_t offset; - size_t probe_length; - }; - FindInfo find_first_non_full(size_t hashval) { - auto seq = probe(hashval); - while (true) { - Group g{ctrl_ + seq.offset()}; - auto mask = g.MatchEmptyOrDeleted(); - if (mask) { - return {seq.offset((size_t)mask.LowestBitSet()), seq.getindex()}; - } - assert(seq.getindex() < capacity_ && "full table!"); - seq.next(); - } - } - - // TODO(alkis): Optimize this assuming *this and that don't overlap. - raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) { - raw_hash_set tmp(std::move(that)); - swap(tmp); - return *this; - } - raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) { - raw_hash_set tmp(std::move(that), alloc_ref()); - swap(tmp); - return *this; - } - -protected: - template - std::pair find_or_prepare_insert(const K& key, size_t hashval) { - auto seq = probe(hashval); - while (true) { - Group g{ctrl_ + seq.offset()}; - for (uint32_t i : g.Match((h2_t)H2(hashval))) { - if (PHMAP_PREDICT_TRUE(PolicyTraits::apply( - EqualElement{key, eq_ref()}, - PolicyTraits::element(slots_ + seq.offset((size_t)i))))) - return {seq.offset((size_t)i), false}; - } - if (PHMAP_PREDICT_TRUE(g.MatchEmpty())) break; - seq.next(); - } - return {prepare_insert(hashval), true}; - } - - template - std::pair find_or_prepare_insert(const K& key) { - return find_or_prepare_insert(key, this->hash(key)); - } - - size_t prepare_insert(size_t hashval) PHMAP_ATTRIBUTE_NOINLINE { - auto target = find_first_non_full(hashval); - if (PHMAP_PREDICT_FALSE(growth_left() == 0 && - !IsDeleted(ctrl_[target.offset]))) { - rehash_and_grow_if_necessary(); - target = find_first_non_full(hashval); - } - ++size_; - growth_left() -= IsEmpty(ctrl_[target.offset]); - set_ctrl(target.offset, H2(hashval)); - infoz_.RecordInsert(hashval, target.probe_length); - return target.offset; - } - - // Constructs the value in the space pointed by the iterator. This only works - // after an unsuccessful find_or_prepare_insert() and before any other - // modifications happen in the raw_hash_set. - // - // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where - // k is the key decomposed from `forward(args)...`, and the bool - // returned by find_or_prepare_insert(k) was true. - // POSTCONDITION: *m.iterator_at(i) == value_type(forward(args)...). - template - void emplace_at(size_t i, Args&&... args) { - PolicyTraits::construct(&alloc_ref(), slots_ + i, - std::forward(args)...); - - assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) == - iterator_at(i) && - "constructed value does not match the lookup key"); - } - - iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; } - const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; } - -private: - friend struct RawHashSetTestOnlyAccess; - - probe_seq probe(size_t hashval) const { - return probe_seq(H1(hashval, ctrl_), capacity_); - } - - // Reset all ctrl bytes back to kEmpty, except the sentinel. - void reset_ctrl(size_t capacity) { - std::memset(ctrl_, kEmpty, capacity + Group::kWidth); - ctrl_[capacity] = kSentinel; - SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity); - } - - void reset_growth_left(size_t capacity) { - growth_left() = CapacityToGrowth(capacity) - size_; - } - - // Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at - // the end too. - void set_ctrl(size_t i, ctrl_t h) { - assert(i < capacity_); - - if (IsFull(h)) { - SanitizerUnpoisonObject(slots_ + i); - } else { - SanitizerPoisonObject(slots_ + i); - } - - ctrl_[i] = h; - ctrl_[((i - Group::kWidth) & capacity_) + 1 + - ((Group::kWidth - 1) & capacity_)] = h; - } - - size_t& growth_left() { return settings_.template get<0>(); } - - template class RefSet, - class M, class P, class H, class E, class A> - friend class parallel_hash_set; - - template class RefSet, - class M, class P, class H, class E, class A> - friend class parallel_hash_map; - - // The representation of the object has two modes: - // - small: For capacities < kWidth-1 - // - large: For the rest. - // - // Differences: - // - In small mode we are able to use the whole capacity. The extra control - // bytes give us at least one "empty" control byte to stop the iteration. - // This is important to make 1 a valid capacity. - // - // - In small mode only the first `capacity()` control bytes after the - // sentinel are valid. The rest contain dummy kEmpty values that do not - // represent a real slot. This is important to take into account on - // find_first_non_full(), where we never try ShouldInsertBackwards() for - // small tables. - bool is_small() const { return capacity_ < Group::kWidth - 1; } - - hasher& hash_ref() { return settings_.template get<1>(); } - const hasher& hash_ref() const { return settings_.template get<1>(); } - key_equal& eq_ref() { return settings_.template get<2>(); } - const key_equal& eq_ref() const { return settings_.template get<2>(); } - allocator_type& alloc_ref() { return settings_.template get<3>(); } - const allocator_type& alloc_ref() const { - return settings_.template get<3>(); - } - - // TODO(alkis): Investigate removing some of these fields: - // - ctrl/slots can be derived from each other - // - size can be moved into the slot array - ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1) * ctrl_t] - slot_type* slots_ = nullptr; // [capacity * slot_type] - size_t size_ = 0; // number of full slots - size_t capacity_ = 0; // total number of slots - HashtablezInfoHandle infoz_; - phmap::priv::CompressedTuple - settings_{0, hasher{}, key_equal{}, allocator_type{}}; -}; - - -// -------------------------------------------------------------------------- -// -------------------------------------------------------------------------- -template -class raw_hash_map : public raw_hash_set -{ - // P is Policy. It's passed as a template argument to support maps that have - // incomplete types as values, as in unordered_map. - // MappedReference<> may be a non-reference type. - template - using MappedReference = decltype(P::value( - std::addressof(std::declval()))); - - // MappedConstReference<> may be a non-reference type. - template - using MappedConstReference = decltype(P::value( - std::addressof(std::declval()))); - - using KeyArgImpl = - KeyArg::value && IsTransparent::value>; - - using Base = raw_hash_set; - -public: - using key_type = typename Policy::key_type; - using mapped_type = typename Policy::mapped_type; - template - using key_arg = typename KeyArgImpl::template type; - - static_assert(!std::is_reference::value, ""); - - // TODO(b/187807849): Evaluate whether to support reference mapped_type and - // remove this assertion if/when it is supported. - static_assert(!std::is_reference::value, ""); - - using iterator = typename raw_hash_map::raw_hash_set::iterator; - using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator; - - raw_hash_map() {} - using Base::raw_hash_set; // use raw_hash_set constructor - - // The last two template parameters ensure that both arguments are rvalues - // (lvalue arguments are handled by the overloads below). This is necessary - // for supporting bitfield arguments. - // - // union { int n : 1; }; - // flat_hash_map m; - // m.insert_or_assign(n, n); - template - std::pair insert_or_assign(key_arg&& k, V&& v) { - return insert_or_assign_impl(std::forward(k), std::forward(v)); - } - - template - std::pair insert_or_assign(key_arg&& k, const V& v) { - return insert_or_assign_impl(std::forward(k), v); - } - - template - std::pair insert_or_assign(const key_arg& k, V&& v) { - return insert_or_assign_impl(k, std::forward(v)); - } - - template - std::pair insert_or_assign(const key_arg& k, const V& v) { - return insert_or_assign_impl(k, v); - } - - template - iterator insert_or_assign(const_iterator, key_arg&& k, V&& v) { - return insert_or_assign(std::forward(k), std::forward(v)).first; - } - - template - iterator insert_or_assign(const_iterator, key_arg&& k, const V& v) { - return insert_or_assign(std::forward(k), v).first; - } - - template - iterator insert_or_assign(const_iterator, const key_arg& k, V&& v) { - return insert_or_assign(k, std::forward(v)).first; - } - - template - iterator insert_or_assign(const_iterator, const key_arg& k, const V& v) { - return insert_or_assign(k, v).first; - } - - template ::value, int>::type = 0, - K* = nullptr> - std::pair try_emplace(key_arg&& k, Args&&... args) { - return try_emplace_impl(std::forward(k), std::forward(args)...); - } - - template ::value, int>::type = 0> - std::pair try_emplace(const key_arg& k, Args&&... args) { - return try_emplace_impl(k, std::forward(args)...); - } - - template - iterator try_emplace(const_iterator, key_arg&& k, Args&&... args) { - return try_emplace(std::forward(k), std::forward(args)...).first; - } - - template - iterator try_emplace(const_iterator, const key_arg& k, Args&&... args) { - return try_emplace(k, std::forward(args)...).first; - } - - template - MappedReference

at(const key_arg& key) { - auto it = this->find(key); - if (it == this->end()) - phmap::base_internal::ThrowStdOutOfRange("phmap at(): lookup non-existent key"); - return Policy::value(&*it); - } - - template - MappedConstReference

at(const key_arg& key) const { - auto it = this->find(key); - if (it == this->end()) - phmap::base_internal::ThrowStdOutOfRange("phmap at(): lookup non-existent key"); - return Policy::value(&*it); - } - - template - MappedReference

operator[](key_arg&& key) { - return Policy::value(&*try_emplace(std::forward(key)).first); - } - - template - MappedReference

operator[](const key_arg& key) { - return Policy::value(&*try_emplace(key).first); - } - -private: - template - std::pair insert_or_assign_impl(K&& k, V&& v) { - auto res = this->find_or_prepare_insert(k); - if (res.second) - this->emplace_at(res.first, std::forward(k), std::forward(v)); - else - Policy::value(&*this->iterator_at(res.first)) = std::forward(v); - return {this->iterator_at(res.first), res.second}; - } - - template - std::pair try_emplace_impl(K&& k, Args&&... args) { - auto res = this->find_or_prepare_insert(k); - if (res.second) - this->emplace_at(res.first, std::piecewise_construct, - std::forward_as_tuple(std::forward(k)), - std::forward_as_tuple(std::forward(args)...)); - return {this->iterator_at(res.first), res.second}; - } -}; - -// ---------------------------------------------------------------------------- -// ---------------------------------------------------------------------------- -// Returns "random" seed. -inline size_t RandomSeed() -{ -#if PHMAP_HAVE_THREAD_LOCAL - static thread_local size_t counter = 0; - size_t value = ++counter; -#else // PHMAP_HAVE_THREAD_LOCAL - static std::atomic counter(0); - size_t value = counter.fetch_add(1, std::memory_order_relaxed); -#endif // PHMAP_HAVE_THREAD_LOCAL - return value ^ static_cast(reinterpret_cast(&counter)); -} - -// ---------------------------------------------------------------------------- -// ---------------------------------------------------------------------------- -template class RefSet, - class Mtx_, - class Policy, class Hash, class Eq, class Alloc> -class parallel_hash_set -{ - using PolicyTraits = hash_policy_traits; - using KeyArgImpl = - KeyArg::value && IsTransparent::value>; - - static_assert(N <= 12, "N = 12 means 4096 hash tables!"); - constexpr static size_t num_tables = 1 << N; - constexpr static size_t mask = num_tables - 1; - -public: - using EmbeddedSet = RefSet; - using EmbeddedIterator= typename EmbeddedSet::iterator; - using EmbeddedConstIterator= typename EmbeddedSet::const_iterator; - using constructor = typename EmbeddedSet::constructor; - using init_type = typename PolicyTraits::init_type; - using key_type = typename PolicyTraits::key_type; - using slot_type = typename PolicyTraits::slot_type; - using allocator_type = Alloc; - using size_type = size_t; - using difference_type = ptrdiff_t; - using hasher = Hash; - using key_equal = Eq; - using policy_type = Policy; - using value_type = typename PolicyTraits::value_type; - using reference = value_type&; - using const_reference = const value_type&; - using pointer = typename phmap::allocator_traits< - allocator_type>::template rebind_traits::pointer; - using const_pointer = typename phmap::allocator_traits< - allocator_type>::template rebind_traits::const_pointer; - - // Alias used for heterogeneous lookup functions. - // `key_arg` evaluates to `K` when the functors are transparent and to - // `key_type` otherwise. It permits template argument deduction on `K` for the - // transparent case. - // -------------------------------------------------------------------- - template - using key_arg = typename KeyArgImpl::template type; - -protected: - using Lockable = phmap::LockableImpl; - - // -------------------------------------------------------------------- - struct Inner : public Lockable - { - struct Params - { - size_t bucket_cnt; - const hasher& hashfn; - const key_equal& eq; - const allocator_type& alloc; - }; - - Inner() {} - - Inner(Params const &p) : set_(p.bucket_cnt, p.hashfn, p.eq, p.alloc) - {} - - bool operator==(const Inner& o) const - { - typename Lockable::SharedLocks l(const_cast(*this), const_cast(o)); - return set_ == o.set_; - } - - EmbeddedSet set_; - }; - -private: - // Give an early error when key_type is not hashable/eq. - // -------------------------------------------------------------------- - auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); - auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); - - using AllocTraits = phmap::allocator_traits; - - static_assert(std::is_lvalue_reference::value, - "Policy::element() must return a reference"); - - template - struct SameAsElementReference : std::is_same< - typename std::remove_cv::type>::type, - typename std::remove_cv::type>::type> {}; - - // An enabler for insert(T&&): T must be convertible to init_type or be the - // same as [cv] value_type [ref]. - // Note: we separate SameAsElementReference into its own type to avoid using - // reference unless we need to. MSVC doesn't seem to like it in some - // cases. - // -------------------------------------------------------------------- - template - using RequiresInsertable = typename std::enable_if< - phmap::disjunction, - SameAsElementReference>::value, - int>::type; - - // RequiresNotInit is a workaround for gcc prior to 7.1. - // See https://godbolt.org/g/Y4xsUh. - template - using RequiresNotInit = - typename std::enable_if::value, int>::type; - - template - using IsDecomposable = IsDecomposable; - -public: - static_assert(std::is_same::value, - "Allocators with custom pointer types are not supported"); - static_assert(std::is_same::value, - "Allocators with custom pointer types are not supported"); - - // --------------------- i t e r a t o r ------------------------------ - class iterator - { - friend class parallel_hash_set; - - public: - using iterator_category = std::forward_iterator_tag; - using value_type = typename parallel_hash_set::value_type; - using reference = - phmap::conditional_t; - using pointer = phmap::remove_reference_t*; - using difference_type = typename parallel_hash_set::difference_type; - using Inner = typename parallel_hash_set::Inner; - using EmbeddedSet = typename parallel_hash_set::EmbeddedSet; - using EmbeddedIterator = typename EmbeddedSet::iterator; - - iterator() {} - - reference operator*() const { return *it_; } - pointer operator->() const { return &operator*(); } - - iterator& operator++() { - assert(inner_); // null inner means we are already at the end - ++it_; - skip_empty(); - return *this; - } - - iterator operator++(int) { - assert(inner_); // null inner means we are already at the end - auto tmp = *this; - ++*this; - return tmp; - } - - friend bool operator==(const iterator& a, const iterator& b) { - return a.inner_ == b.inner_ && (!a.inner_ || a.it_ == b.it_); - } - - friend bool operator!=(const iterator& a, const iterator& b) { - return !(a == b); - } - - private: - iterator(Inner *inner, Inner *inner_end, const EmbeddedIterator& it) : - inner_(inner), inner_end_(inner_end), it_(it) { // for begin() and end() - if (inner) - it_end_ = inner->set_.end(); - } - - void skip_empty() { - while (it_ == it_end_) { - ++inner_; - if (inner_ == inner_end_) { - inner_ = nullptr; // marks end() - break; - } - else { - it_ = inner_->set_.begin(); - it_end_ = inner_->set_.end(); - } - } - } - - Inner *inner_ = nullptr; - Inner *inner_end_ = nullptr; - EmbeddedIterator it_, it_end_; - }; - - // --------------------- c o n s t i t e r a t o r ----------------- - class const_iterator - { - friend class parallel_hash_set; - - public: - using iterator_category = typename iterator::iterator_category; - using value_type = typename parallel_hash_set::value_type; - using reference = typename parallel_hash_set::const_reference; - using pointer = typename parallel_hash_set::const_pointer; - using difference_type = typename parallel_hash_set::difference_type; - using Inner = typename parallel_hash_set::Inner; - - const_iterator() {} - // Implicit construction from iterator. - const_iterator(iterator i) : iter_(std::move(i)) {} - - reference operator*() const { return *(iter_); } - pointer operator->() const { return iter_.operator->(); } - - const_iterator& operator++() { - ++iter_; - return *this; - } - const_iterator operator++(int) { return iter_++; } - - friend bool operator==(const const_iterator& a, const const_iterator& b) { - return a.iter_ == b.iter_; - } - friend bool operator!=(const const_iterator& a, const const_iterator& b) { - return !(a == b); - } - - private: - const_iterator(const Inner *inner, const Inner *inner_end, const EmbeddedIterator& it) - : iter_(const_cast(inner), - const_cast(inner_end), - const_cast(it)) {} - - iterator iter_; - }; - - using node_type = node_handle, Alloc>; - using insert_return_type = InsertReturnType; - - // ------------------------- c o n s t r u c t o r s ------------------ - - parallel_hash_set() noexcept( - std::is_nothrow_default_constructible::value&& - std::is_nothrow_default_constructible::value&& - std::is_nothrow_default_constructible::value) {} - -#if (__cplusplus >= 201703L || _MSVC_LANG >= 201402) && (defined(_MSC_VER) || defined(__clang__) || (defined(__GNUC__) && __GNUC__ > 6)) - explicit parallel_hash_set(size_t bucket_cnt, - const hasher& hash_param = hasher(), - const key_equal& eq = key_equal(), - const allocator_type& alloc = allocator_type()) : - parallel_hash_set(typename Inner::Params{bucket_cnt, hash_param, eq, alloc}, - phmap::make_index_sequence{}) - {} - - template - parallel_hash_set(typename Inner::Params const &p, - phmap::index_sequence) : sets_{((void)i, p)...} - {} -#else - explicit parallel_hash_set(size_t bucket_cnt, - const hasher& hash_param = hasher(), - const key_equal& eq = key_equal(), - const allocator_type& alloc = allocator_type()) { - for (auto& inner : sets_) - inner.set_ = EmbeddedSet(bucket_cnt / N, hash_param, eq, alloc); - } -#endif - - parallel_hash_set(size_t bucket_cnt, - const hasher& hash_param, - const allocator_type& alloc) - : parallel_hash_set(bucket_cnt, hash_param, key_equal(), alloc) {} - - parallel_hash_set(size_t bucket_cnt, const allocator_type& alloc) - : parallel_hash_set(bucket_cnt, hasher(), key_equal(), alloc) {} - - explicit parallel_hash_set(const allocator_type& alloc) - : parallel_hash_set(0, hasher(), key_equal(), alloc) {} - - template - parallel_hash_set(InputIter first, InputIter last, size_t bucket_cnt = 0, - const hasher& hash_param = hasher(), const key_equal& eq = key_equal(), - const allocator_type& alloc = allocator_type()) - : parallel_hash_set(bucket_cnt, hash_param, eq, alloc) { - insert(first, last); - } - - template - parallel_hash_set(InputIter first, InputIter last, size_t bucket_cnt, - const hasher& hash_param, const allocator_type& alloc) - : parallel_hash_set(first, last, bucket_cnt, hash_param, key_equal(), alloc) {} - - template - parallel_hash_set(InputIter first, InputIter last, size_t bucket_cnt, - const allocator_type& alloc) - : parallel_hash_set(first, last, bucket_cnt, hasher(), key_equal(), alloc) {} - - template - parallel_hash_set(InputIter first, InputIter last, const allocator_type& alloc) - : parallel_hash_set(first, last, 0, hasher(), key_equal(), alloc) {} - - // Instead of accepting std::initializer_list as the first - // argument like std::unordered_set does, we have two overloads - // that accept std::initializer_list and std::initializer_list. - // This is advantageous for performance. - // - // // Turns {"abc", "def"} into std::initializer_list, then copies - // // the strings into the set. - // std::unordered_set s = {"abc", "def"}; - // - // // Turns {"abc", "def"} into std::initializer_list, then - // // copies the strings into the set. - // phmap::flat_hash_set s = {"abc", "def"}; - // - // The same trick is used in insert(). - // - // The enabler is necessary to prevent this constructor from triggering where - // the copy constructor is meant to be called. - // - // phmap::flat_hash_set a, b{a}; - // - // RequiresNotInit is a workaround for gcc prior to 7.1. - // -------------------------------------------------------------------- - template = 0, RequiresInsertable = 0> - parallel_hash_set(std::initializer_list init, size_t bucket_cnt = 0, - const hasher& hash_param = hasher(), const key_equal& eq = key_equal(), - const allocator_type& alloc = allocator_type()) - : parallel_hash_set(init.begin(), init.end(), bucket_cnt, hash_param, eq, alloc) {} - - parallel_hash_set(std::initializer_list init, size_t bucket_cnt = 0, - const hasher& hash_param = hasher(), const key_equal& eq = key_equal(), - const allocator_type& alloc = allocator_type()) - : parallel_hash_set(init.begin(), init.end(), bucket_cnt, hash_param, eq, alloc) {} - - template = 0, RequiresInsertable = 0> - parallel_hash_set(std::initializer_list init, size_t bucket_cnt, - const hasher& hash_param, const allocator_type& alloc) - : parallel_hash_set(init, bucket_cnt, hash_param, key_equal(), alloc) {} - - parallel_hash_set(std::initializer_list init, size_t bucket_cnt, - const hasher& hash_param, const allocator_type& alloc) - : parallel_hash_set(init, bucket_cnt, hash_param, key_equal(), alloc) {} - - template = 0, RequiresInsertable = 0> - parallel_hash_set(std::initializer_list init, size_t bucket_cnt, - const allocator_type& alloc) - : parallel_hash_set(init, bucket_cnt, hasher(), key_equal(), alloc) {} - - parallel_hash_set(std::initializer_list init, size_t bucket_cnt, - const allocator_type& alloc) - : parallel_hash_set(init, bucket_cnt, hasher(), key_equal(), alloc) {} - - template = 0, RequiresInsertable = 0> - parallel_hash_set(std::initializer_list init, const allocator_type& alloc) - : parallel_hash_set(init, 0, hasher(), key_equal(), alloc) {} - - parallel_hash_set(std::initializer_list init, - const allocator_type& alloc) - : parallel_hash_set(init, 0, hasher(), key_equal(), alloc) {} - - parallel_hash_set(const parallel_hash_set& that) - : parallel_hash_set(that, AllocTraits::select_on_container_copy_construction( - that.alloc_ref())) {} - - parallel_hash_set(const parallel_hash_set& that, const allocator_type& a) - : parallel_hash_set(0, that.hash_ref(), that.eq_ref(), a) { - for (size_t i=0; i::value&& - std::is_nothrow_copy_constructible::value&& - std::is_nothrow_copy_constructible::value) - : parallel_hash_set(std::move(that), that.alloc_ref()) { - } - - parallel_hash_set(parallel_hash_set&& that, const allocator_type& a) - { - for (size_t i=0; i::is_always_equal::value && - std::is_nothrow_move_assignable::value && - std::is_nothrow_move_assignable::value) { - for (size_t i=0; i(this)->begin(); } - const_iterator end() const { return const_cast(this)->end(); } - const_iterator cbegin() const { return begin(); } - const_iterator cend() const { return end(); } - - bool empty() const { return !size(); } - - size_t size() const { - size_t sz = 0; - for (const auto& inner : sets_) - sz += inner.set_.size(); - return sz; - } - - size_t capacity() const { - size_t c = 0; - for (const auto& inner : sets_) - c += inner.set_.capacity(); - return c; - } - - size_t max_size() const { return (std::numeric_limits::max)(); } - - PHMAP_ATTRIBUTE_REINITIALIZES void clear() { - for (auto& inner : sets_) - { - typename Lockable::UniqueLock m(inner); - inner.set_.clear(); - } - } - - // extension - clears only soecified submap - // ---------------------------------------- - void clear(std::size_t submap_index) { - Inner& inner = sets_[submap_index]; - typename Lockable::UniqueLock m(inner); - inner.set_.clear(); - } - - // This overload kicks in when the argument is an rvalue of insertable and - // decomposable type other than init_type. - // - // flat_hash_map m; - // m.insert(std::make_pair("abc", 42)); - // -------------------------------------------------------------------- - template = 0, - typename std::enable_if::value, int>::type = 0, - T* = nullptr> - std::pair insert(T&& value) { - return emplace(std::forward(value)); - } - - // This overload kicks in when the argument is a bitfield or an lvalue of - // insertable and decomposable type. - // - // union { int n : 1; }; - // flat_hash_set s; - // s.insert(n); - // - // flat_hash_set s; - // const char* p = "hello"; - // s.insert(p); - // - // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace - // RequiresInsertable with RequiresInsertable. - // We are hitting this bug: https://godbolt.org/g/1Vht4f. - // -------------------------------------------------------------------- - template < - class T, RequiresInsertable = 0, - typename std::enable_if::value, int>::type = 0> - std::pair insert(const T& value) { - return emplace(value); - } - - // This overload kicks in when the argument is an rvalue of init_type. Its - // purpose is to handle brace-init-list arguments. - // - // flat_hash_set> s; - // s.insert({"abc", 42}); - // -------------------------------------------------------------------- - std::pair insert(init_type&& value) { - return emplace(std::move(value)); - } - - template = 0, - typename std::enable_if::value, int>::type = 0, - T* = nullptr> - iterator insert(const_iterator, T&& value) { - return insert(std::forward(value)).first; - } - - // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace - // RequiresInsertable with RequiresInsertable. - // We are hitting this bug: https://godbolt.org/g/1Vht4f. - // -------------------------------------------------------------------- - template < - class T, RequiresInsertable = 0, - typename std::enable_if::value, int>::type = 0> - iterator insert(const_iterator, const T& value) { - return insert(value).first; - } - - iterator insert(const_iterator, init_type&& value) { - return insert(std::move(value)).first; - } - - template - void insert(InputIt first, InputIt last) { - for (; first != last; ++first) insert(*first); - } - - template = 0, RequiresInsertable = 0> - void insert(std::initializer_list ilist) { - insert(ilist.begin(), ilist.end()); - } - - void insert(std::initializer_list ilist) { - insert(ilist.begin(), ilist.end()); - } - - insert_return_type insert(node_type&& node) { - if (!node) - return {end(), false, node_type()}; - auto& key = node.key(); - size_t hashval = this->hash(key); - Inner& inner = sets_[subidx(hashval)]; - auto& set = inner.set_; - - typename Lockable::UniqueLock m(inner); - auto res = set.insert(std::move(node), hashval); - return { make_iterator(&inner, res.position), - res.inserted, - res.inserted ? node_type() : std::move(res.node) }; - } - - iterator insert(const_iterator, node_type&& node) { - return insert(std::move(node)).first; - } - - struct ReturnKey_ - { - template - Key operator()(Key&& k, const Args&...) const { - return std::forward(k); - } - }; - - // -------------------------------------------------------------------- - // phmap extension: emplace_with_hash - // ---------------------------------- - // same as emplace, but hashval is provided - // -------------------------------------------------------------------- - template - std::pair emplace_decomposable_with_hash(const K& key, size_t hashval, Args&&... args) - { - Inner& inner = sets_[subidx(hashval)]; - auto& set = inner.set_; - typename Lockable::UniqueLock m(inner); - return make_rv(&inner, set.emplace_decomposable(key, hashval, std::forward(args)...)); - } - - struct EmplaceDecomposableHashval - { - template - std::pair operator()(const K& key, Args&&... args) const { - return s.emplace_decomposable_with_hash(key, hashval, std::forward(args)...); - } - parallel_hash_set& s; - size_t hashval; - }; - - // This overload kicks in if we can deduce the key from args. This enables us - // to avoid constructing value_type if an entry with the same key already - // exists. - // - // For example: - // - // flat_hash_map m = {{"abc", "def"}}; - // // Creates no std::string copies and makes no heap allocations. - // m.emplace("abc", "xyz"); - // -------------------------------------------------------------------- - template ::value, int>::type = 0> - std::pair emplace_with_hash(size_t hashval, Args&&... args) { - return PolicyTraits::apply(EmplaceDecomposableHashval{*this, hashval}, - std::forward(args)...); - } - - // This overload kicks in if we cannot deduce the key from args. It constructs - // value_type unconditionally and then either moves it into the table or - // destroys. - // -------------------------------------------------------------------- - template ::value, int>::type = 0> - std::pair emplace_with_hash(size_t hashval, Args&&... args) { - typename std::aligned_storage::type raw; - slot_type* slot = reinterpret_cast(&raw); - - PolicyTraits::construct(&alloc_ref(), slot, std::forward(args)...); - const auto& elem = PolicyTraits::element(slot); - Inner& inner = sets_[subidx(hashval)]; - auto& set = inner.set_; - typename Lockable::UniqueLock m(inner); - typename EmbeddedSet::template InsertSlotWithHash f { - inner, std::move(*slot), hashval}; - return make_rv(PolicyTraits::apply(f, elem)); - } - - template - iterator emplace_hint_with_hash(size_t hashval, const_iterator, Args&&... args) { - return emplace_with_hash(hashval, std::forward(args)...).first; - } - - template - iterator lazy_emplace_with_hash(const key_arg& key, size_t hashval, F&& f) { - Inner& inner = sets_[subidx(hashval)]; - auto& set = inner.set_; - typename Lockable::UniqueLock m(inner); - return make_iterator(&inner, set.lazy_emplace_with_hash(key, hashval, std::forward(f))); - } - - // -------------------------------------------------------------------- - // end of phmap expension - // -------------------------------------------------------------------- - - template - std::pair emplace_decomposable(const K& key, Args&&... args) - { - size_t hashval = this->hash(key); - Inner& inner = sets_[subidx(hashval)]; - auto& set = inner.set_; - typename Lockable::UniqueLock m(inner); - return make_rv(&inner, set.emplace_decomposable(key, hashval, std::forward(args)...)); - } - - struct EmplaceDecomposable - { - template - std::pair operator()(const K& key, Args&&... args) const { - return s.emplace_decomposable(key, std::forward(args)...); - } - parallel_hash_set& s; - }; - - // This overload kicks in if we can deduce the key from args. This enables us - // to avoid constructing value_type if an entry with the same key already - // exists. - // - // For example: - // - // flat_hash_map m = {{"abc", "def"}}; - // // Creates no std::string copies and makes no heap allocations. - // m.emplace("abc", "xyz"); - // -------------------------------------------------------------------- - template ::value, int>::type = 0> - std::pair emplace(Args&&... args) { - return PolicyTraits::apply(EmplaceDecomposable{*this}, - std::forward(args)...); - } - - // This overload kicks in if we cannot deduce the key from args. It constructs - // value_type unconditionally and then either moves it into the table or - // destroys. - // -------------------------------------------------------------------- - template ::value, int>::type = 0> - std::pair emplace(Args&&... args) { - typename std::aligned_storage::type raw; - slot_type* slot = reinterpret_cast(&raw); - size_t hashval = this->hash(PolicyTraits::key(slot)); - - PolicyTraits::construct(&alloc_ref(), slot, std::forward(args)...); - const auto& elem = PolicyTraits::element(slot); - Inner& inner = sets_[subidx(hashval)]; - auto& set = inner.set_; - typename Lockable::UniqueLock m(inner); - typename EmbeddedSet::template InsertSlotWithHash f { - inner, std::move(*slot), hashval}; - return make_rv(PolicyTraits::apply(f, elem)); - } - - template - iterator emplace_hint(const_iterator, Args&&... args) { - return emplace(std::forward(args)...).first; - } - - iterator make_iterator(Inner* inner, const EmbeddedIterator it) - { - if (it == inner->set_.end()) - return iterator(); - return iterator(inner, &sets_[0] + num_tables, it); - } - - std::pair make_rv(Inner* inner, - const std::pair& res) - { - return {iterator(inner, &sets_[0] + num_tables, res.first), res.second}; - } - - // lazy_emplace - // ------------ - template - iterator lazy_emplace(const key_arg& key, F&& f) { - auto hashval = this->hash(key); - Inner& inner = sets_[subidx(hashval)]; - auto& set = inner.set_; - typename Lockable::UniqueLock m(inner); - return make_iterator(&inner, set.lazy_emplace_with_hash(key, hashval, std::forward(f))); - } - - // emplace_single - // -------------- - template - void emplace_single_with_hash(const key_arg& key, size_t hashval, F&& f) { - Inner& inner = sets_[subidx(hashval)]; - auto& set = inner.set_; - typename Lockable::UniqueLock m(inner); - set.emplace_single_with_hash(key, hashval, std::forward(f)); - } - - template - void emplace_single(const key_arg& key, F&& f) { - auto hashval = this->hash(key); - emplace_single_with_hash(key, hashval, std::forward(f)); - } - - // if set contains key, lambda is called with the value_type (under read lock protection), - // and if_contains returns true. This is a const API and lambda should not modify the value - // ----------------------------------------------------------------------------------------- - template - bool if_contains(const key_arg& key, F&& f) const { - return const_cast(this)->template - modify_if_impl(key, std::forward(f)); - } - - // if set contains key, lambda is called with the value_type without read lock protection, - // and if_contains_unsafe returns true. This is a const API and lambda should not modify the value - // This should be used only if we know that no other thread may be mutating the set at the time. - // ----------------------------------------------------------------------------------------- - template - bool if_contains_unsafe(const key_arg& key, F&& f) const { - return const_cast(this)->template - modify_if_impl::DoNothing>(key, std::forward(f)); - } - - // if map contains key, lambda is called with the value_type (under write lock protection), - // and modify_if returns true. This is a non-const API and lambda is allowed to modify the mapped value - // ---------------------------------------------------------------------------------------------------- - template - bool modify_if(const key_arg& key, F&& f) { - return modify_if_impl(key, std::forward(f)); - } - - // ----------------------------------------------------------------------------------------- - template - bool modify_if_impl(const key_arg& key, F&& f) { -#if __cplusplus >= 201703L - static_assert(std::is_invocable::value); -#endif - L m; - auto ptr = this->template find_ptr(key, this->hash(key), m); - if (ptr == nullptr) - return false; - std::forward(f)(*ptr); - return true; - } - - // if map contains key, lambda is called with the mapped value (under write lock protection). - // If the lambda returns true, the key is subsequently erased from the map (the write lock - // is only released after erase). - // returns true if key was erased, false otherwise. - // ---------------------------------------------------------------------------------------------------- - template - bool erase_if(const key_arg& key, F&& f) { - return erase_if_impl(key, std::forward(f)); - } - - template - bool erase_if_impl(const key_arg& key, F&& f) { -#if __cplusplus >= 201703L - static_assert(std::is_invocable::value); -#endif - L m; - auto it = this->template find(key, this->hash(key), m); - if (it == this->end()) return false; - if (std::forward(f)(const_cast(*it))) - { - this->erase(it); - return true; - } - return false; - } - - // if map already contains key, the first lambda is called with the mapped value (under - // write lock protection) and can update the mapped value. - // if map does not contains key, the second lambda is called and it should invoke the - // passed constructor to construct the value - // returns true if key was not already present, false otherwise. - // --------------------------------------------------------------------------------------- - template - bool lazy_emplace_l(const key_arg& key, FExists&& fExists, FEmplace&& fEmplace) { - typename Lockable::UniqueLock m; - auto res = this->find_or_prepare_insert(key, m); - Inner* inner = std::get<0>(res); - if (std::get<2>(res)) - inner->set_.lazy_emplace_at(std::get<1>(res), std::forward(fEmplace)); - else { - auto it = this->iterator_at(inner, inner->set_.iterator_at(std::get<1>(res))); - std::forward(fExists)(const_cast(*it)); // in case of the set, non "key" part of value_type can be changed - } - return std::get<2>(res); - } - - // Extension API: support iterating over all values - // - // flat_hash_set s; - // s.insert(...); - // s.for_each([](auto const & key) { - // // Safely iterates over all the keys - // }); - template - void for_each(F&& fCallback) const { - for (auto const& inner : sets_) { - typename Lockable::SharedLock m(const_cast(inner)); - std::for_each(inner.set_.begin(), inner.set_.end(), fCallback); - } - } - - // this version allows to modify the values - void for_each_m(std::function && fCallback) { - for (auto& inner : sets_) { - typename Lockable::UniqueLock m(const_cast(inner)); - std::for_each(inner.set_.begin(), inner.set_.end(), fCallback); - } - } - - // Extension API: support for heterogeneous keys. - // - // std::unordered_set s; - // // Turns "abc" into std::string. - // s.erase("abc"); - // - // flat_hash_set s; - // // Uses "abc" directly without copying it into std::string. - // s.erase("abc"); - // - // -------------------------------------------------------------------- - template - size_type erase(const key_arg& key) { - auto hashval = this->hash(key); - Inner& inner = sets_[subidx(hashval)]; - auto& set = inner.set_; - typename Lockable::UpgradeLock m(inner); - auto it = set.find(key, hashval); - if (it == set.end()) - return 0; - - typename Lockable::UpgradeToUnique unique(m); - set._erase(it); - return 1; - } - - // -------------------------------------------------------------------- - iterator erase(const_iterator cit) { return erase(cit.iter_); } - - // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`, - // this method returns void to reduce algorithmic complexity to O(1). In - // order to erase while iterating across a map, use the following idiom (which - // also works for standard containers): - // - // for (auto it = m.begin(), end = m.end(); it != end;) { - // if () { - // m._erase(it++); - // } else { - // ++it; - // } - // } - // - // Do not use erase APIs taking iterators when accessing the map concurrently - // -------------------------------------------------------------------- - void _erase(iterator it, bool do_lock = true) { - Inner* inner = it.inner_; - assert(inner != nullptr); - auto& set = inner->set_; - // typename Lockable::UniqueLock m(*inner); // don't lock here - - set._erase(it.it_); - } - void _erase(const_iterator cit) { _erase(cit.iter_); } - - // This overload is necessary because otherwise erase(const K&) would be - // a better match if non-const iterator is passed as an argument. - // Do not use erase APIs taking iterators when accessing the map concurrently - // -------------------------------------------------------------------- - iterator erase(iterator it) { _erase(it++); return it; } - - iterator erase(const_iterator first, const_iterator last) { - while (first != last) { - _erase(first++); - } - return last.iter_; - } - - // Moves elements from `src` into `this`. - // If the element already exists in `this`, it is left unmodified in `src`. - // Do not use erase APIs taking iterators when accessing the map concurrently - // -------------------------------------------------------------------- - template - void merge(parallel_hash_set& src) { // NOLINT - assert(this != &src); - if (this != &src) - { - for (size_t i=0; i - void merge(parallel_hash_set&& src) { - merge(src); - } - - node_type extract(const_iterator position) { - return position.iter_.inner_->set_.extract(EmbeddedConstIterator(position.iter_.it_)); - } - - template < - class K = key_type, - typename std::enable_if::value, int>::type = 0> - node_type extract(const key_arg& key) { - auto it = find(key); - return it == end() ? node_type() : extract(const_iterator{it}); - } - - void swap(parallel_hash_set& that) noexcept( - IsNoThrowSwappable() && - (!AllocTraits::propagate_on_container_swap::value || - IsNoThrowSwappable())) { - using std::swap; - for (size_t i=0; i target ? normalized : target); - } - - // Extension API: support for heterogeneous keys. - // - // std::unordered_set s; - // // Turns "abc" into std::string. - // s.count("abc"); - // - // ch_set s; - // // Uses "abc" directly without copying it into std::string. - // s.count("abc"); - // -------------------------------------------------------------------- - template - size_t count(const key_arg& key) const { - return find(key) == end() ? 0 : 1; - } - - // Issues CPU prefetch instructions for the memory needed to find or insert - // a key. Like all lookup functions, this support heterogeneous keys. - // - // NOTE: This is a very low level operation and should not be used without - // specific benchmarks indicating its importance. - // -------------------------------------------------------------------- - void prefetch_hash(size_t hashval) const { - const Inner& inner = sets_[subidx(hashval)]; - const auto& set = inner.set_; - typename Lockable::SharedLock m(const_cast(inner)); - set.prefetch_hash(hashval); - } - - template - void prefetch(const key_arg& key) const { - prefetch_hash(this->hash(key)); - } - - // The API of find() has two extensions. - // - // 1. The hash can be passed by the user. It must be equal to the hash of the - // key. - // - // 2. The type of the key argument doesn't have to be key_type. This is so - // called heterogeneous key support. - // -------------------------------------------------------------------- - template - iterator find(const key_arg& key, size_t hashval) { - typename Lockable::SharedLock m; - return find(key, hashval, m); - } - - template - iterator find(const key_arg& key) { - return find(key, this->hash(key)); - } - - template - const_iterator find(const key_arg& key, size_t hashval) const { - return const_cast(this)->find(key, hashval); - } - - template - const_iterator find(const key_arg& key) const { - return find(key, this->hash(key)); - } - - template - bool contains(const key_arg& key) const { - return find(key) != end(); - } - - template - bool contains(const key_arg& key, size_t hashval) const { - return find(key, hashval) != end(); - } - - template - std::pair equal_range(const key_arg& key) { - auto it = find(key); - if (it != end()) return {it, std::next(it)}; - return {it, it}; - } - - template - std::pair equal_range( - const key_arg& key) const { - auto it = find(key); - if (it != end()) return {it, std::next(it)}; - return {it, it}; - } - - size_t bucket_count() const { - size_t sz = 0; - for (const auto& inner : sets_) - { - typename Lockable::SharedLock m(const_cast(inner)); - sz += inner.set_.bucket_count(); - } - return sz; - } - - float load_factor() const { - size_t _capacity = bucket_count(); - return _capacity ? static_cast(static_cast(size()) / _capacity) : 0; - } - - float max_load_factor() const { return 1.0f; } - void max_load_factor(float) { - // Does nothing. - } - - hasher hash_function() const { return hash_ref(); } // warning: doesn't match internal hash - use hash() member function - key_equal key_eq() const { return eq_ref(); } - allocator_type get_allocator() const { return alloc_ref(); } - - friend bool operator==(const parallel_hash_set& a, const parallel_hash_set& b) { - return std::equal(a.sets_.begin(), a.sets_.end(), b.sets_.begin()); - } - - friend bool operator!=(const parallel_hash_set& a, const parallel_hash_set& b) { - return !(a == b); - } - - friend void swap(parallel_hash_set& a, - parallel_hash_set& b) noexcept(noexcept(a.swap(b))) { - a.swap(b); - } - - template - size_t hash(const K& key) const { - return HashElement{hash_ref()}(key); - } - -#if !defined(PHMAP_NON_DETERMINISTIC) - template - bool phmap_dump(OutputArchive& ar) const; - - template - bool phmap_load(InputArchive& ar); -#endif - -private: - template - friend struct phmap::priv::hashtable_debug_internal::HashtableDebugAccess; - - struct FindElement - { - template - const_iterator operator()(const K& key, Args&&...) const { - return s.find(key); - } - const parallel_hash_set& s; - }; - - struct HashElement - { - template - size_t operator()(const K& key, Args&&...) const { - return phmap_mix()(h(key)); - } - const hasher& h; - }; - - template - struct EqualElement - { - template - bool operator()(const K2& lhs, Args&&...) const { - return eq(lhs, rhs); - } - const K1& rhs; - const key_equal& eq; - }; - - // "erases" the object from the container, except that it doesn't actually - // destroy the object. It only updates all the metadata of the class. - // This can be used in conjunction with Policy::transfer to move the object to - // another place. - // -------------------------------------------------------------------- - void erase_meta_only(const_iterator cit) { - auto &it = cit.iter_; - assert(it.set_ != nullptr); - it.set_.erase_meta_only(const_iterator(it.it_)); - } - - void drop_deletes_without_resize() PHMAP_ATTRIBUTE_NOINLINE { - for (auto& inner : sets_) - { - typename Lockable::UniqueLock m(inner); - inner.set_.drop_deletes_without_resize(); - } - } - - bool has_element(const value_type& elem) const { - size_t hashval = PolicyTraits::apply(HashElement{hash_ref()}, elem); - Inner& inner = sets_[subidx(hashval)]; - auto& set = inner.set_; - typename Lockable::SharedLock m(const_cast(inner)); - return set.has_element(elem, hashval); - } - - // TODO(alkis): Optimize this assuming *this and that don't overlap. - // -------------------------------------------------------------------- - parallel_hash_set& move_assign(parallel_hash_set&& that, std::true_type) { - parallel_hash_set tmp(std::move(that)); - swap(tmp); - return *this; - } - - parallel_hash_set& move_assign(parallel_hash_set&& that, std::false_type) { - parallel_hash_set tmp(std::move(that), alloc_ref()); - swap(tmp); - return *this; - } - -protected: - template - pointer find_ptr(const key_arg& key, size_t hashval, L& mutexlock) - { - Inner& inner = sets_[subidx(hashval)]; - auto& set = inner.set_; - mutexlock = std::move(L(inner)); - return set.find_ptr(key, hashval); - } - - template - iterator find(const key_arg& key, size_t hashval, L& mutexlock) { - Inner& inner = sets_[subidx(hashval)]; - auto& set = inner.set_; - mutexlock = std::move(L(inner)); - return make_iterator(&inner, set.find(key, hashval)); - } - - template - std::tuple - find_or_prepare_insert_with_hash(size_t hashval, const K& key, typename Lockable::UniqueLock &mutexlock) { - Inner& inner = sets_[subidx(hashval)]; - auto& set = inner.set_; - mutexlock = std::move(typename Lockable::UniqueLock(inner)); - auto p = set.find_or_prepare_insert(key, hashval); // std::pair - return std::make_tuple(&inner, p.first, p.second); - } - - template - std::tuple - find_or_prepare_insert(const K& key, typename Lockable::UniqueLock &mutexlock) { - return find_or_prepare_insert_with_hash(this->hash(key), key, mutexlock); - } - - iterator iterator_at(Inner *inner, - const EmbeddedIterator& it) { - return {inner, &sets_[0] + num_tables, it}; - } - const_iterator iterator_at(Inner *inner, - const EmbeddedIterator& it) const { - return {inner, &sets_[0] + num_tables, it}; - } - - static size_t subidx(size_t hashval) { - return ((hashval >> 8) ^ (hashval >> 16) ^ (hashval >> 24)) & mask; - } - - static size_t subcnt() { - return num_tables; - } - -private: - friend struct RawHashSetTestOnlyAccess; - - size_t growth_left() { - size_t sz = 0; - for (const auto& set : sets_) - sz += set.growth_left(); - return sz; - } - - hasher& hash_ref() { return sets_[0].set_.hash_ref(); } - const hasher& hash_ref() const { return sets_[0].set_.hash_ref(); } - key_equal& eq_ref() { return sets_[0].set_.eq_ref(); } - const key_equal& eq_ref() const { return sets_[0].set_.eq_ref(); } - allocator_type& alloc_ref() { return sets_[0].set_.alloc_ref(); } - const allocator_type& alloc_ref() const { - return sets_[0].set_.alloc_ref(); - } - -protected: // protected in case users want to derive fromm this - std::array sets_; -}; - -// -------------------------------------------------------------------------- -// -------------------------------------------------------------------------- -template class RefSet, - class Mtx_, - class Policy, class Hash, class Eq, class Alloc> -class parallel_hash_map : public parallel_hash_set -{ - // P is Policy. It's passed as a template argument to support maps that have - // incomplete types as values, as in unordered_map. - // MappedReference<> may be a non-reference type. - template - using MappedReference = decltype(P::value( - std::addressof(std::declval()))); - - // MappedConstReference<> may be a non-reference type. - template - using MappedConstReference = decltype(P::value( - std::addressof(std::declval()))); - - using KeyArgImpl = - KeyArg::value && IsTransparent::value>; - - using Base = typename parallel_hash_map::parallel_hash_set; - using Lockable = phmap::LockableImpl; - -public: - using key_type = typename Policy::key_type; - using mapped_type = typename Policy::mapped_type; - using value_type = typename Base::value_type; - template - using key_arg = typename KeyArgImpl::template type; - - static_assert(!std::is_reference::value, ""); - // TODO(alkis): remove this assertion and verify that reference mapped_type is - // supported. - static_assert(!std::is_reference::value, ""); - - using iterator = typename parallel_hash_map::parallel_hash_set::iterator; - using const_iterator = typename parallel_hash_map::parallel_hash_set::const_iterator; - - parallel_hash_map() {} - -#ifdef __INTEL_COMPILER - using Base::parallel_hash_set; -#else - using parallel_hash_map::parallel_hash_set::parallel_hash_set; -#endif - - // The last two template parameters ensure that both arguments are rvalues - // (lvalue arguments are handled by the overloads below). This is necessary - // for supporting bitfield arguments. - // - // union { int n : 1; }; - // flat_hash_map m; - // m.insert_or_assign(n, n); - template - std::pair insert_or_assign(key_arg&& k, V&& v) { - return insert_or_assign_impl(std::forward(k), std::forward(v)); - } - - template - std::pair insert_or_assign(key_arg&& k, const V& v) { - return insert_or_assign_impl(std::forward(k), v); - } - - template - std::pair insert_or_assign(const key_arg& k, V&& v) { - return insert_or_assign_impl(k, std::forward(v)); - } - - template - std::pair insert_or_assign(const key_arg& k, const V& v) { - return insert_or_assign_impl(k, v); - } - - template - iterator insert_or_assign(const_iterator, key_arg&& k, V&& v) { - return insert_or_assign(std::forward(k), std::forward(v)).first; - } - - template - iterator insert_or_assign(const_iterator, key_arg&& k, const V& v) { - return insert_or_assign(std::forward(k), v).first; - } - - template - iterator insert_or_assign(const_iterator, const key_arg& k, V&& v) { - return insert_or_assign(k, std::forward(v)).first; - } - - template - iterator insert_or_assign(const_iterator, const key_arg& k, const V& v) { - return insert_or_assign(k, v).first; - } - - template ::value, int>::type = 0, - K* = nullptr> - std::pair try_emplace(key_arg&& k, Args&&... args) { - return try_emplace_impl(std::forward(k), std::forward(args)...); - } - - template ::value, int>::type = 0> - std::pair try_emplace(const key_arg& k, Args&&... args) { - return try_emplace_impl(k, std::forward(args)...); - } - - template - iterator try_emplace(const_iterator, key_arg&& k, Args&&... args) { - return try_emplace(std::forward(k), std::forward(args)...).first; - } - - template - iterator try_emplace(const_iterator, const key_arg& k, Args&&... args) { - return try_emplace(k, std::forward(args)...).first; - } - - template - MappedReference

at(const key_arg& key) { - auto it = this->find(key); - if (it == this->end()) - phmap::base_internal::ThrowStdOutOfRange("phmap at(): lookup non-existent key"); - return Policy::value(&*it); - } - - template - MappedConstReference

at(const key_arg& key) const { - auto it = this->find(key); - if (it == this->end()) - phmap::base_internal::ThrowStdOutOfRange("phmap at(): lookup non-existent key"); - return Policy::value(&*it); - } - - // ----------- phmap extensions -------------------------- - - template ::value, int>::type = 0, - K* = nullptr> - std::pair try_emplace_with_hash(size_t hashval, key_arg&& k, Args&&... args) { - return try_emplace_impl_with_hash(hashval, std::forward(k), std::forward(args)...); - } - - template ::value, int>::type = 0> - std::pair try_emplace_with_hash(size_t hashval, const key_arg& k, Args&&... args) { - return try_emplace_impl_with_hash(hashval, k, std::forward(args)...); - } - - template - iterator try_emplace_with_hash(size_t hashval, const_iterator, key_arg&& k, Args&&... args) { - return try_emplace_with_hash(hashval, std::forward(k), std::forward(args)...).first; - } - - template - iterator try_emplace_with_hash(size_t hashval, const_iterator, const key_arg& k, Args&&... args) { - return try_emplace_with_hash(hashval, k, std::forward(args)...).first; - } - - // if map does not contains key, it is inserted and the mapped value is value-constructed - // with the provided arguments (if any), as with try_emplace. - // if map already contains key, then the lambda is called with the mapped value (under - // write lock protection) and can update the mapped value. - // returns true if key was not already present, false otherwise. - // --------------------------------------------------------------------------------------- - template - bool try_emplace_l(K&& k, F&& f, Args&&... args) { - typename Lockable::UniqueLock m; - auto res = this->find_or_prepare_insert(k, m); - typename Base::Inner *inner = std::get<0>(res); - if (std::get<2>(res)) - inner->set_.emplace_at(std::get<1>(res), std::piecewise_construct, - std::forward_as_tuple(std::forward(k)), - std::forward_as_tuple(std::forward(args)...)); - else { - auto it = this->iterator_at(inner, inner->set_.iterator_at(std::get<1>(res))); - std::forward(f)(const_cast(*it)); // in case of the set, non "key" part of value_type can be changed - } - return std::get<2>(res); - } - - // ----------- end of phmap extensions -------------------------- - - template - MappedReference

operator[](key_arg&& key) { - return Policy::value(&*try_emplace(std::forward(key)).first); - } - - template - MappedReference

operator[](const key_arg& key) { - return Policy::value(&*try_emplace(key).first); - } - -private: - - template - std::pair insert_or_assign_impl(K&& k, V&& v) { - typename Lockable::UniqueLock m; - auto res = this->find_or_prepare_insert(k, m); - typename Base::Inner *inner = std::get<0>(res); - if (std::get<2>(res)) - inner->set_.emplace_at(std::get<1>(res), std::forward(k), std::forward(v)); - else - Policy::value(&*inner->set_.iterator_at(std::get<1>(res))) = std::forward(v); - return {this->iterator_at(inner, inner->set_.iterator_at(std::get<1>(res))), - std::get<2>(res)}; - } - - template - std::pair try_emplace_impl(K&& k, Args&&... args) { - typename Lockable::UniqueLock m; - auto res = this->find_or_prepare_insert(k, m); - typename Base::Inner *inner = std::get<0>(res); - if (std::get<2>(res)) - inner->set_.emplace_at(std::get<1>(res), std::piecewise_construct, - std::forward_as_tuple(std::forward(k)), - std::forward_as_tuple(std::forward(args)...)); - return {this->iterator_at(inner, inner->set_.iterator_at(std::get<1>(res))), - std::get<2>(res)}; - } - - template - std::pair try_emplace_impl_with_hash(size_t hashval, K&& k, Args&&... args) { - typename Lockable::UniqueLock m; - auto res = this->find_or_prepare_insert_with_hash(hashval, k, m); - typename Base::Inner *inner = std::get<0>(res); - if (std::get<2>(res)) - inner->set_.emplace_at(std::get<1>(res), std::piecewise_construct, - std::forward_as_tuple(std::forward(k)), - std::forward_as_tuple(std::forward(args)...)); - return {this->iterator_at(inner, inner->set_.iterator_at(std::get<1>(res))), - std::get<2>(res)}; - } - - -}; - - -// Constructs T into uninitialized storage pointed by `ptr` using the args -// specified in the tuple. -// ---------------------------------------------------------------------------- -template -void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) { - memory_internal::ConstructFromTupleImpl( - alloc, ptr, std::forward(t), - phmap::make_index_sequence< - std::tuple_size::type>::value>()); -} - -// Constructs T using the args specified in the tuple and calls F with the -// constructed value. -// ---------------------------------------------------------------------------- -template -decltype(std::declval()(std::declval())) WithConstructed( - Tuple&& t, F&& f) { - return memory_internal::WithConstructedImpl( - std::forward(t), - phmap::make_index_sequence< - std::tuple_size::type>::value>(), - std::forward(f)); -} - -// ---------------------------------------------------------------------------- -// Given arguments of an std::pair's consructor, PairArgs() returns a pair of -// tuples with references to the passed arguments. The tuples contain -// constructor arguments for the first and the second elements of the pair. -// -// The following two snippets are equivalent. -// -// 1. std::pair p(args...); -// -// 2. auto a = PairArgs(args...); -// std::pair p(std::piecewise_construct, -// std::move(p.first), std::move(p.second)); -// ---------------------------------------------------------------------------- -inline std::pair, std::tuple<>> PairArgs() { return {}; } - -template -std::pair, std::tuple> PairArgs(F&& f, S&& s) { - return {std::piecewise_construct, std::forward_as_tuple(std::forward(f)), - std::forward_as_tuple(std::forward(s))}; -} - -template -std::pair, std::tuple> PairArgs( - const std::pair& p) { - return PairArgs(p.first, p.second); -} - -template -std::pair, std::tuple> PairArgs(std::pair&& p) { - return PairArgs(std::forward(p.first), std::forward(p.second)); -} - -template -auto PairArgs(std::piecewise_construct_t, F&& f, S&& s) - -> decltype(std::make_pair(memory_internal::TupleRef(std::forward(f)), - memory_internal::TupleRef(std::forward(s)))) { - return std::make_pair(memory_internal::TupleRef(std::forward(f)), - memory_internal::TupleRef(std::forward(s))); -} - -// A helper function for implementing apply() in map policies. -// ---------------------------------------------------------------------------- -template -auto DecomposePair(F&& f, Args&&... args) - -> decltype(memory_internal::DecomposePairImpl( - std::forward(f), PairArgs(std::forward(args)...))) { - return memory_internal::DecomposePairImpl( - std::forward(f), PairArgs(std::forward(args)...)); -} - -// A helper function for implementing apply() in set policies. -// ---------------------------------------------------------------------------- -template -decltype(std::declval()(std::declval(), std::declval())) -DecomposeValue(F&& f, Arg&& arg) { - const auto& key = arg; - return std::forward(f)(key, std::forward(arg)); -} - - -// -------------------------------------------------------------------------- -// Policy: a policy defines how to perform different operations on -// the slots of the hashtable (see hash_policy_traits.h for the full interface -// of policy). -// -// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The -// functor should accept a key and return size_t as hash. For best performance -// it is important that the hash function provides high entropy across all bits -// of the hash. -// -// Eq: a (possibly polymorphic) functor that compares two keys for equality. It -// should accept two (of possibly different type) keys and return a bool: true -// if they are equal, false if they are not. If two keys compare equal, then -// their hash values as defined by Hash MUST be equal. -// -// Allocator: an Allocator [https://devdocs.io/cpp/concept/allocator] with which -// the storage of the hashtable will be allocated and the elements will be -// constructed and destroyed. -// -------------------------------------------------------------------------- -template -struct FlatHashSetPolicy -{ - using slot_type = T; - using key_type = T; - using init_type = T; - using constant_iterators = std::true_type; - - template - static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { - phmap::allocator_traits::construct(*alloc, slot, - std::forward(args)...); - } - - template - static void destroy(Allocator* alloc, slot_type* slot) { - phmap::allocator_traits::destroy(*alloc, slot); - } - - template - static void transfer(Allocator* alloc, slot_type* new_slot, - slot_type* old_slot) { - construct(alloc, new_slot, std::move(*old_slot)); - destroy(alloc, old_slot); - } - - static T& element(slot_type* slot) { return *slot; } - - template - static decltype(phmap::priv::DecomposeValue( - std::declval(), std::declval()...)) - apply(F&& f, Args&&... args) { - return phmap::priv::DecomposeValue( - std::forward(f), std::forward(args)...); - } - - static size_t space_used(const T*) { return 0; } -}; - -// -------------------------------------------------------------------------- -// -------------------------------------------------------------------------- -template -struct FlatHashMapPolicy -{ - using slot_policy = priv::map_slot_policy; - using slot_type = typename slot_policy::slot_type; - using key_type = K; - using mapped_type = V; - using init_type = std::pair; - - template - static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { - slot_policy::construct(alloc, slot, std::forward(args)...); - } - - template - static void destroy(Allocator* alloc, slot_type* slot) { - slot_policy::destroy(alloc, slot); - } - - template - static void transfer(Allocator* alloc, slot_type* new_slot, - slot_type* old_slot) { - slot_policy::transfer(alloc, new_slot, old_slot); - } - - template - static decltype(phmap::priv::DecomposePair( - std::declval(), std::declval()...)) - apply(F&& f, Args&&... args) { - return phmap::priv::DecomposePair(std::forward(f), - std::forward(args)...); - } - - static size_t space_used(const slot_type*) { return 0; } - - static std::pair& element(slot_type* slot) { return slot->value; } - - static V& value(std::pair* kv) { return kv->second; } - static const V& value(const std::pair* kv) { return kv->second; } -}; - -template -struct node_hash_policy { - static_assert(std::is_lvalue_reference::value, ""); - - using slot_type = typename std::remove_cv< - typename std::remove_reference::type>::type*; - - template - static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { - *slot = Policy::new_element(alloc, std::forward(args)...); - } - - template - static void destroy(Alloc* alloc, slot_type* slot) { - Policy::delete_element(alloc, *slot); - } - - template - static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) { - *new_slot = *old_slot; - } - - static size_t space_used(const slot_type* slot) { - if (slot == nullptr) return Policy::element_space_used(nullptr); - return Policy::element_space_used(*slot); - } - - static Reference element(slot_type* slot) { return **slot; } - - template - static auto value(T* elem) -> decltype(P::value(elem)) { - return P::value(elem); - } - - template - static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward(ts)...)) { - return P::apply(std::forward(ts)...); - } -}; - -// -------------------------------------------------------------------------- -// -------------------------------------------------------------------------- -template -struct NodeHashSetPolicy - : phmap::priv::node_hash_policy> -{ - using key_type = T; - using init_type = T; - using constant_iterators = std::true_type; - - template - static T* new_element(Allocator* alloc, Args&&... args) { - using ValueAlloc = - typename phmap::allocator_traits::template rebind_alloc; - ValueAlloc value_alloc(*alloc); - T* res = phmap::allocator_traits::allocate(value_alloc, 1); - phmap::allocator_traits::construct(value_alloc, res, - std::forward(args)...); - return res; - } - - template - static void delete_element(Allocator* alloc, T* elem) { - using ValueAlloc = - typename phmap::allocator_traits::template rebind_alloc; - ValueAlloc value_alloc(*alloc); - phmap::allocator_traits::destroy(value_alloc, elem); - phmap::allocator_traits::deallocate(value_alloc, elem, 1); - } - - template - static decltype(phmap::priv::DecomposeValue( - std::declval(), std::declval()...)) - apply(F&& f, Args&&... args) { - return phmap::priv::DecomposeValue( - std::forward(f), std::forward(args)...); - } - - static size_t element_space_used(const T*) { return sizeof(T); } -}; - -// -------------------------------------------------------------------------- -// -------------------------------------------------------------------------- -template -class NodeHashMapPolicy - : public phmap::priv::node_hash_policy< - std::pair&, NodeHashMapPolicy> -{ - using value_type = std::pair; - -public: - using key_type = Key; - using mapped_type = Value; - using init_type = std::pair; - - template - static value_type* new_element(Allocator* alloc, Args&&... args) { - using PairAlloc = typename phmap::allocator_traits< - Allocator>::template rebind_alloc; - PairAlloc pair_alloc(*alloc); - value_type* res = - phmap::allocator_traits::allocate(pair_alloc, 1); - phmap::allocator_traits::construct(pair_alloc, res, - std::forward(args)...); - return res; - } - - template - static void delete_element(Allocator* alloc, value_type* pair) { - using PairAlloc = typename phmap::allocator_traits< - Allocator>::template rebind_alloc; - PairAlloc pair_alloc(*alloc); - phmap::allocator_traits::destroy(pair_alloc, pair); - phmap::allocator_traits::deallocate(pair_alloc, pair, 1); - } - - template - static decltype(phmap::priv::DecomposePair( - std::declval(), std::declval()...)) - apply(F&& f, Args&&... args) { - return phmap::priv::DecomposePair(std::forward(f), - std::forward(args)...); - } - - static size_t element_space_used(const value_type*) { - return sizeof(value_type); - } - - static Value& value(value_type* elem) { return elem->second; } - static const Value& value(const value_type* elem) { return elem->second; } -}; - - -// -------------------------------------------------------------------------- -// hash_default -// -------------------------------------------------------------------------- - -#if PHMAP_HAVE_STD_STRING_VIEW - -// support char16_t wchar_t .... -template -struct StringHashT -{ - using is_transparent = void; - - size_t operator()(std::basic_string_view v) const { - std::string_view bv{reinterpret_cast(v.data()), v.size() * sizeof(CharT)}; - return std::hash()(bv); - } -}; - -// Supports heterogeneous lookup for basic_string-like elements. -template -struct StringHashEqT -{ - using Hash = StringHashT; - - struct Eq { - using is_transparent = void; - - bool operator()(std::basic_string_view lhs, std::basic_string_view rhs) const { - return lhs == rhs; - } - }; -}; - -template <> -struct HashEq : StringHashEqT {}; - -template <> -struct HashEq : StringHashEqT {}; - -// char16_t -template <> -struct HashEq : StringHashEqT {}; - -template <> -struct HashEq : StringHashEqT {}; - -// wchar_t -template <> -struct HashEq : StringHashEqT {}; - -template <> -struct HashEq : StringHashEqT {}; - -#endif - -// Supports heterogeneous lookup for pointers and smart pointers. -// ------------------------------------------------------------- -template -struct HashEq -{ - struct Hash { - using is_transparent = void; - template - size_t operator()(const U& ptr) const { - return phmap::Hash{}(HashEq::ToPtr(ptr)); - } - }; - - struct Eq { - using is_transparent = void; - template - bool operator()(const A& a, const B& b) const { - return HashEq::ToPtr(a) == HashEq::ToPtr(b); - } - }; - -private: - static const T* ToPtr(const T* ptr) { return ptr; } - - template - static const T* ToPtr(const std::unique_ptr& ptr) { - return ptr.get(); - } - - template - static const T* ToPtr(const std::shared_ptr& ptr) { - return ptr.get(); - } -}; - -template -struct HashEq> : HashEq {}; - -template -struct HashEq> : HashEq {}; - -namespace hashtable_debug_internal { - -// -------------------------------------------------------------------------- -// -------------------------------------------------------------------------- - -template -struct has_member_type_raw_hash_set : std::false_type -{}; -template -struct has_member_type_raw_hash_set> : std::true_type -{}; - -template -struct HashtableDebugAccess::value>::type> -{ - using Traits = typename Set::PolicyTraits; - using Slot = typename Traits::slot_type; - - static size_t GetNumProbes(const Set& set, - const typename Set::key_type& key) { - size_t num_probes = 0; - size_t hashval = set.hash(key); - auto seq = set.probe(hashval); - while (true) { - priv::Group g{set.ctrl_ + seq.offset()}; - for (uint32_t i : g.Match(priv::H2(hashval))) { - if (Traits::apply( - typename Set::template EqualElement{ - key, set.eq_ref()}, - Traits::element(set.slots_ + seq.offset((size_t)i)))) - return num_probes; - ++num_probes; - } - if (g.MatchEmpty()) return num_probes; - seq.next(); - ++num_probes; - } - } - - static size_t AllocatedByteSize(const Set& c) { - size_t capacity = c.capacity_; - if (capacity == 0) return 0; - auto layout = Set::MakeLayout(capacity); - size_t m = layout.AllocSize(); - - size_t per_slot = Traits::space_used(static_cast(nullptr)); - if (per_slot != ~size_t{}) { - m += per_slot * c.size(); - } else { - for (size_t i = 0; i != capacity; ++i) { - if (priv::IsFull(c.ctrl_[i])) { - m += Traits::space_used(c.slots_ + i); - } - } - } - return m; - } - - static size_t LowerBoundAllocatedByteSize(size_t size) { - size_t capacity = GrowthToLowerboundCapacity(size); - if (capacity == 0) return 0; - auto layout = Set::MakeLayout(NormalizeCapacity(capacity)); - size_t m = layout.AllocSize(); - size_t per_slot = Traits::space_used(static_cast(nullptr)); - if (per_slot != ~size_t{}) { - m += per_slot * size; - } - return m; - } -}; - - -template -struct has_member_type_EmbeddedSet : std::false_type -{}; -template -struct has_member_type_EmbeddedSet> : std::true_type -{}; - -template -struct HashtableDebugAccess::value>::type> { - using Traits = typename Set::PolicyTraits; - using Slot = typename Traits::slot_type; - using EmbeddedSet = typename Set::EmbeddedSet; - - static size_t GetNumProbes(const Set& set, const typename Set::key_type& key) { - size_t hashval = set.hash(key); - auto& inner = set.sets_[set.subidx(hashval)]; - auto& inner_set = inner.set_; - return HashtableDebugAccess::GetNumProbes(inner_set, key); - } -}; - -} // namespace hashtable_debug_internal -} // namespace priv - -// ----------------------------------------------------------------------------- -// phmap::flat_hash_set -// ----------------------------------------------------------------------------- -// An `phmap::flat_hash_set` is an unordered associative container which has -// been optimized for both speed and memory footprint in most common use cases. -// Its interface is similar to that of `std::unordered_set` with the -// following notable differences: -// -// * Supports heterogeneous lookup, through `find()`, `operator[]()` and -// `insert()`, provided that the set is provided a compatible heterogeneous -// hashing function and equality operator. -// * Invalidates any references and pointers to elements within the table after -// `rehash()`. -// * Contains a `capacity()` member function indicating the number of element -// slots (open, deleted, and empty) within the hash set. -// * Returns `void` from the `_erase(iterator)` overload. -// ----------------------------------------------------------------------------- -template // default values in phmap_fwd_decl.h -class flat_hash_set - : public phmap::priv::raw_hash_set< - phmap::priv::FlatHashSetPolicy, Hash, Eq, Alloc> -{ - using Base = typename flat_hash_set::raw_hash_set; - -public: - flat_hash_set() {} -#ifdef __INTEL_COMPILER - using Base::raw_hash_set; -#else - using Base::Base; -#endif - using Base::begin; - using Base::cbegin; - using Base::cend; - using Base::end; - using Base::capacity; - using Base::empty; - using Base::max_size; - using Base::size; - using Base::clear; // may shrink - To avoid shrinking `erase(begin(), end())` - using Base::erase; - using Base::insert; - using Base::emplace; - using Base::emplace_hint; - using Base::extract; - using Base::merge; - using Base::swap; - using Base::rehash; - using Base::reserve; - using Base::contains; - using Base::count; - using Base::equal_range; - using Base::find; - using Base::bucket_count; - using Base::load_factor; - using Base::max_load_factor; - using Base::get_allocator; - using Base::hash_function; - using Base::hash; - using Base::key_eq; -}; - -// ----------------------------------------------------------------------------- -// phmap::flat_hash_map -// ----------------------------------------------------------------------------- -// -// An `phmap::flat_hash_map` is an unordered associative container which -// has been optimized for both speed and memory footprint in most common use -// cases. Its interface is similar to that of `std::unordered_map` with -// the following notable differences: -// -// * Supports heterogeneous lookup, through `find()`, `operator[]()` and -// `insert()`, provided that the map is provided a compatible heterogeneous -// hashing function and equality operator. -// * Invalidates any references and pointers to elements within the table after -// `rehash()`. -// * Contains a `capacity()` member function indicating the number of element -// slots (open, deleted, and empty) within the hash map. -// * Returns `void` from the `_erase(iterator)` overload. -// ----------------------------------------------------------------------------- -template // default values in phmap_fwd_decl.h -class flat_hash_map : public phmap::priv::raw_hash_map< - phmap::priv::FlatHashMapPolicy, - Hash, Eq, Alloc> { - using Base = typename flat_hash_map::raw_hash_map; - -public: - flat_hash_map() {} -#ifdef __INTEL_COMPILER - using Base::raw_hash_map; -#else - using Base::Base; -#endif - using Base::begin; - using Base::cbegin; - using Base::cend; - using Base::end; - using Base::capacity; - using Base::empty; - using Base::max_size; - using Base::size; - using Base::clear; - using Base::erase; - using Base::insert; - using Base::insert_or_assign; - using Base::emplace; - using Base::emplace_hint; - using Base::try_emplace; - using Base::extract; - using Base::merge; - using Base::swap; - using Base::rehash; - using Base::reserve; - using Base::at; - using Base::contains; - using Base::count; - using Base::equal_range; - using Base::find; - using Base::operator[]; - using Base::bucket_count; - using Base::load_factor; - using Base::max_load_factor; - using Base::get_allocator; - using Base::hash_function; - using Base::hash; - using Base::key_eq; -}; - -// ----------------------------------------------------------------------------- -// phmap::node_hash_set -// ----------------------------------------------------------------------------- -// An `phmap::node_hash_set` is an unordered associative container which -// has been optimized for both speed and memory footprint in most common use -// cases. Its interface is similar to that of `std::unordered_set` with the -// following notable differences: -// -// * Supports heterogeneous lookup, through `find()`, `operator[]()` and -// `insert()`, provided that the map is provided a compatible heterogeneous -// hashing function and equality operator. -// * Contains a `capacity()` member function indicating the number of element -// slots (open, deleted, and empty) within the hash set. -// * Returns `void` from the `erase(iterator)` overload. -// ----------------------------------------------------------------------------- -template // default values in phmap_fwd_decl.h -class node_hash_set - : public phmap::priv::raw_hash_set< - phmap::priv::NodeHashSetPolicy, Hash, Eq, Alloc> -{ - using Base = typename node_hash_set::raw_hash_set; - -public: - node_hash_set() {} -#ifdef __INTEL_COMPILER - using Base::raw_hash_set; -#else - using Base::Base; -#endif - using Base::begin; - using Base::cbegin; - using Base::cend; - using Base::end; - using Base::capacity; - using Base::empty; - using Base::max_size; - using Base::size; - using Base::clear; - using Base::erase; - using Base::insert; - using Base::emplace; - using Base::emplace_hint; - using Base::emplace_with_hash; - using Base::emplace_hint_with_hash; - using Base::extract; - using Base::merge; - using Base::swap; - using Base::rehash; - using Base::reserve; - using Base::contains; - using Base::count; - using Base::equal_range; - using Base::find; - using Base::bucket_count; - using Base::load_factor; - using Base::max_load_factor; - using Base::get_allocator; - using Base::hash_function; - using Base::hash; - using Base::key_eq; - typename Base::hasher hash_funct() { return this->hash_function(); } - void resize(typename Base::size_type hint) { this->rehash(hint); } -}; - -// ----------------------------------------------------------------------------- -// phmap::node_hash_map -// ----------------------------------------------------------------------------- -// -// An `phmap::node_hash_map` is an unordered associative container which -// has been optimized for both speed and memory footprint in most common use -// cases. Its interface is similar to that of `std::unordered_map` with -// the following notable differences: -// -// * Supports heterogeneous lookup, through `find()`, `operator[]()` and -// `insert()`, provided that the map is provided a compatible heterogeneous -// hashing function and equality operator. -// * Contains a `capacity()` member function indicating the number of element -// slots (open, deleted, and empty) within the hash map. -// * Returns `void` from the `erase(iterator)` overload. -// ----------------------------------------------------------------------------- -template // default values in phmap_fwd_decl.h -class node_hash_map - : public phmap::priv::raw_hash_map< - phmap::priv::NodeHashMapPolicy, Hash, Eq, - Alloc> -{ - using Base = typename node_hash_map::raw_hash_map; - -public: - node_hash_map() {} -#ifdef __INTEL_COMPILER - using Base::raw_hash_map; -#else - using Base::Base; -#endif - using Base::begin; - using Base::cbegin; - using Base::cend; - using Base::end; - using Base::capacity; - using Base::empty; - using Base::max_size; - using Base::size; - using Base::clear; - using Base::erase; - using Base::insert; - using Base::insert_or_assign; - using Base::emplace; - using Base::emplace_hint; - using Base::try_emplace; - using Base::extract; - using Base::merge; - using Base::swap; - using Base::rehash; - using Base::reserve; - using Base::at; - using Base::contains; - using Base::count; - using Base::equal_range; - using Base::find; - using Base::operator[]; - using Base::bucket_count; - using Base::load_factor; - using Base::max_load_factor; - using Base::get_allocator; - using Base::hash_function; - using Base::hash; - using Base::key_eq; - typename Base::hasher hash_funct() { return this->hash_function(); } - void resize(typename Base::size_type hint) { this->rehash(hint); } -}; - -// ----------------------------------------------------------------------------- -// phmap::parallel_flat_hash_set -// ----------------------------------------------------------------------------- -template // default values in phmap_fwd_decl.h -class parallel_flat_hash_set - : public phmap::priv::parallel_hash_set< - N, phmap::priv::raw_hash_set, Mtx_, - phmap::priv::FlatHashSetPolicy, - Hash, Eq, Alloc> -{ - using Base = typename parallel_flat_hash_set::parallel_hash_set; - -public: - parallel_flat_hash_set() {} -#ifdef __INTEL_COMPILER - using Base::parallel_hash_set; -#else - using Base::Base; -#endif - using Base::hash; - using Base::subidx; - using Base::subcnt; - using Base::begin; - using Base::cbegin; - using Base::cend; - using Base::end; - using Base::capacity; - using Base::empty; - using Base::max_size; - using Base::size; - using Base::clear; - using Base::erase; - using Base::insert; - using Base::emplace; - using Base::emplace_hint; - using Base::emplace_with_hash; - using Base::emplace_hint_with_hash; - using Base::extract; - using Base::merge; - using Base::swap; - using Base::rehash; - using Base::reserve; - using Base::contains; - using Base::count; - using Base::equal_range; - using Base::find; - using Base::bucket_count; - using Base::load_factor; - using Base::max_load_factor; - using Base::get_allocator; - using Base::hash_function; - using Base::key_eq; -}; - -// ----------------------------------------------------------------------------- -// phmap::parallel_flat_hash_map - default values in phmap_fwd_decl.h -// ----------------------------------------------------------------------------- -template -class parallel_flat_hash_map : public phmap::priv::parallel_hash_map< - N, phmap::priv::raw_hash_set, Mtx_, - phmap::priv::FlatHashMapPolicy, - Hash, Eq, Alloc> -{ - using Base = typename parallel_flat_hash_map::parallel_hash_map; - -public: - parallel_flat_hash_map() {} -#ifdef __INTEL_COMPILER - using Base::parallel_hash_map; -#else - using Base::Base; -#endif - using Base::hash; - using Base::subidx; - using Base::subcnt; - using Base::begin; - using Base::cbegin; - using Base::cend; - using Base::end; - using Base::capacity; - using Base::empty; - using Base::max_size; - using Base::size; - using Base::clear; - using Base::erase; - using Base::insert; - using Base::insert_or_assign; - using Base::emplace; - using Base::emplace_hint; - using Base::try_emplace; - using Base::emplace_with_hash; - using Base::emplace_hint_with_hash; - using Base::try_emplace_with_hash; - using Base::extract; - using Base::merge; - using Base::swap; - using Base::rehash; - using Base::reserve; - using Base::at; - using Base::contains; - using Base::count; - using Base::equal_range; - using Base::find; - using Base::operator[]; - using Base::bucket_count; - using Base::load_factor; - using Base::max_load_factor; - using Base::get_allocator; - using Base::hash_function; - using Base::key_eq; -}; - -// ----------------------------------------------------------------------------- -// phmap::parallel_node_hash_set -// ----------------------------------------------------------------------------- -template -class parallel_node_hash_set - : public phmap::priv::parallel_hash_set< - N, phmap::priv::raw_hash_set, Mtx_, - phmap::priv::NodeHashSetPolicy, Hash, Eq, Alloc> -{ - using Base = typename parallel_node_hash_set::parallel_hash_set; - -public: - parallel_node_hash_set() {} -#ifdef __INTEL_COMPILER - using Base::parallel_hash_set; -#else - using Base::Base; -#endif - using Base::hash; - using Base::subidx; - using Base::subcnt; - using Base::begin; - using Base::cbegin; - using Base::cend; - using Base::end; - using Base::capacity; - using Base::empty; - using Base::max_size; - using Base::size; - using Base::clear; - using Base::erase; - using Base::insert; - using Base::emplace; - using Base::emplace_hint; - using Base::emplace_with_hash; - using Base::emplace_hint_with_hash; - using Base::extract; - using Base::merge; - using Base::swap; - using Base::rehash; - using Base::reserve; - using Base::contains; - using Base::count; - using Base::equal_range; - using Base::find; - using Base::bucket_count; - using Base::load_factor; - using Base::max_load_factor; - using Base::get_allocator; - using Base::hash_function; - using Base::key_eq; - typename Base::hasher hash_funct() { return this->hash_function(); } - void resize(typename Base::size_type hint) { this->rehash(hint); } -}; - -// ----------------------------------------------------------------------------- -// phmap::parallel_node_hash_map -// ----------------------------------------------------------------------------- -template -class parallel_node_hash_map - : public phmap::priv::parallel_hash_map< - N, phmap::priv::raw_hash_set, Mtx_, - phmap::priv::NodeHashMapPolicy, Hash, Eq, - Alloc> -{ - using Base = typename parallel_node_hash_map::parallel_hash_map; - -public: - parallel_node_hash_map() {} -#ifdef __INTEL_COMPILER - using Base::parallel_hash_map; -#else - using Base::Base; -#endif - using Base::hash; - using Base::subidx; - using Base::subcnt; - using Base::begin; - using Base::cbegin; - using Base::cend; - using Base::end; - using Base::capacity; - using Base::empty; - using Base::max_size; - using Base::size; - using Base::clear; - using Base::erase; - using Base::insert; - using Base::insert_or_assign; - using Base::emplace; - using Base::emplace_hint; - using Base::try_emplace; - using Base::emplace_with_hash; - using Base::emplace_hint_with_hash; - using Base::try_emplace_with_hash; - using Base::extract; - using Base::merge; - using Base::swap; - using Base::rehash; - using Base::reserve; - using Base::at; - using Base::contains; - using Base::count; - using Base::equal_range; - using Base::find; - using Base::operator[]; - using Base::bucket_count; - using Base::load_factor; - using Base::max_load_factor; - using Base::get_allocator; - using Base::hash_function; - using Base::key_eq; - typename Base::hasher hash_funct() { return this->hash_function(); } - void resize(typename Base::size_type hint) { this->rehash(hint); } -}; - -} // namespace phmap - - -namespace phmap { - namespace priv { - template - std::size_t erase_if(C &c, Pred pred) { - auto old_size = c.size(); - for (auto i = c.begin(), last = c.end(); i != last; ) { - if (pred(*i)) { - i = c.erase(i); - } else { - ++i; - } - } - return old_size - c.size(); - } - } // priv - - // ======== erase_if for phmap set containers ================================== - template - std::size_t erase_if(phmap::flat_hash_set& c, Pred pred) { - return phmap::priv::erase_if(c, std::move(pred)); - } - - template - std::size_t erase_if(phmap::node_hash_set& c, Pred pred) { - return phmap::priv::erase_if(c, std::move(pred)); - } - - template - std::size_t erase_if(phmap::parallel_flat_hash_set& c, Pred pred) { - return phmap::priv::erase_if(c, std::move(pred)); - } - - template - std::size_t erase_if(phmap::parallel_node_hash_set& c, Pred pred) { - return phmap::priv::erase_if(c, std::move(pred)); - } - - // ======== erase_if for phmap map containers ================================== - template - std::size_t erase_if(phmap::flat_hash_map& c, Pred pred) { - return phmap::priv::erase_if(c, std::move(pred)); - } - - template - std::size_t erase_if(phmap::node_hash_map& c, Pred pred) { - return phmap::priv::erase_if(c, std::move(pred)); - } - - template - std::size_t erase_if(phmap::parallel_flat_hash_map& c, Pred pred) { - return phmap::priv::erase_if(c, std::move(pred)); - } - - template - std::size_t erase_if(phmap::parallel_node_hash_map& c, Pred pred) { - return phmap::priv::erase_if(c, std::move(pred)); - } - -} // phmap - -#ifdef _MSC_VER - #pragma warning(pop) -#endif - - -#endif // phmap_h_guard_ diff --git a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/parallel_hashmap/phmap_base.h b/pytorch_sparse-0.6.17/third_party/parallel-hashmap/parallel_hashmap/phmap_base.h deleted file mode 100644 index 2a2f380..0000000 --- a/pytorch_sparse-0.6.17/third_party/parallel-hashmap/parallel_hashmap/phmap_base.h +++ /dev/null @@ -1,5164 +0,0 @@ -#if !defined(phmap_base_h_guard_) -#define phmap_base_h_guard_ - -// --------------------------------------------------------------------------- -// Copyright (c) 2019, Gregory Popovitch - greg7mdp@gmail.com -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Includes work from abseil-cpp (https://github.com/abseil/abseil-cpp) -// with modifications. -// -// Copyright 2018 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// --------------------------------------------------------------------------- - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include // for std::lock - -#include "phmap_config.h" - -#ifdef PHMAP_HAVE_SHARED_MUTEX - #include // after "phmap_config.h" -#endif - -#ifdef _MSC_VER - #pragma warning(push) - #pragma warning(disable : 4514) // unreferenced inline function has been removed - #pragma warning(disable : 4582) // constructor is not implicitly called - #pragma warning(disable : 4625) // copy constructor was implicitly defined as deleted - #pragma warning(disable : 4626) // assignment operator was implicitly defined as deleted - #pragma warning(disable : 4710) // function not inlined - #pragma warning(disable : 4711) // selected for automatic inline expansion - #pragma warning(disable : 4820) // '6' bytes padding added after data member -#endif // _MSC_VER - -namespace phmap { - -template using Allocator = typename std::allocator; - -template using Pair = typename std::pair; - -template -struct EqualTo -{ - inline bool operator()(const T& a, const T& b) const - { - return std::equal_to()(a, b); - } -}; - -template -struct Less -{ - inline bool operator()(const T& a, const T& b) const - { - return std::less()(a, b); - } -}; - -namespace type_traits_internal { - -template -struct VoidTImpl { - using type = void; -}; - -// This trick to retrieve a default alignment is necessary for our -// implementation of aligned_storage_t to be consistent with any implementation -// of std::aligned_storage. -// --------------------------------------------------------------------------- -template > -struct default_alignment_of_aligned_storage; - -template -struct default_alignment_of_aligned_storage> { - static constexpr size_t value = Align; -}; - -// NOTE: The `is_detected` family of templates here differ from the library -// fundamentals specification in that for library fundamentals, `Op` is -// evaluated as soon as the type `is_detected` undergoes -// substitution, regardless of whether or not the `::value` is accessed. That -// is inconsistent with all other standard traits and prevents lazy evaluation -// in larger contexts (such as if the `is_detected` check is a trailing argument -// of a `conjunction`. This implementation opts to instead be lazy in the same -// way that the standard traits are (this "defect" of the detection idiom -// specifications has been reported). -// --------------------------------------------------------------------------- - -template class Op, class... Args> -struct is_detected_impl { - using type = std::false_type; -}; - -template