diff --git a/docs/reference/beignet.datasets.md b/docs/reference/beignet.datasets.md new file mode 100644 index 0000000000..19ea16585e --- /dev/null +++ b/docs/reference/beignet.datasets.md @@ -0,0 +1,80 @@ +::: beignet.datasets.ATOM3DDataset +::: beignet.datasets.ATOM3DMSPDataset +::: beignet.datasets.ATOM3DPPIDataset +::: beignet.datasets.ATOM3DPSRDataset +::: beignet.datasets.ATOM3DRESDataset +::: beignet.datasets.ATOM3DRSRDataset +::: beignet.datasets.ATOM3DSMPDataset +::: beignet.datasets.AqSolDBSolubilityDataset +::: beignet.datasets.AstraZenecaClearanceDataset +::: beignet.datasets.AstraZenecaLipophilicityDataset +::: beignet.datasets.AstraZenecaPlasmaProteinBindingRateDataset +::: beignet.datasets.BindingDBIC50Dataset +::: beignet.datasets.BindingDBKdDataset +::: beignet.datasets.BindingDBKiDataset +::: beignet.datasets.BroccatelliPGlycoproteinInhibitionDataset +::: beignet.datasets.CarbonMangelsCytochromeP4502C9SubstrateDataset +::: beignet.datasets.CarbonMangelsCytochromeP4502D6SubstrateDataset +::: beignet.datasets.CarbonMangelsCytochromeP4503A4SubstrateDataset +::: beignet.datasets.ChEMBLDataset +::: beignet.datasets.ClinToxDataset +::: beignet.datasets.DAVISDataset +::: beignet.datasets.DataFrameDataset +::: beignet.datasets.DisGeNETDataset +::: beignet.datasets.DrugCombDataset +::: beignet.datasets.FASTADataset +::: beignet.datasets.FreeSolvDataset +::: beignet.datasets.GDSC1Dataset +::: beignet.datasets.GDSC2Dataset +::: beignet.datasets.HDF5TrajectoryDataset +::: beignet.datasets.HouHumanIntestinalAbsorptionDataset +::: beignet.datasets.HuRIDataset +::: beignet.datasets.JespersenIEDBDataset +::: beignet.datasets.JespersenPDBDataset +::: beignet.datasets.KIBADataset +::: beignet.datasets.LMDBDataset +::: beignet.datasets.LombardoVolumeOfDistributionAtSteadyStateDataset +::: beignet.datasets.MOSESDataset +::: beignet.datasets.MaBioavailabilityDataset +::: beignet.datasets.MartinsBloodBrainBarrierDataset +::: beignet.datasets.NCATSPAMPAPermeabilityDataset +::: beignet.datasets.ObachHalfLifeDataset +::: beignet.datasets.PDB70Dataset +::: beignet.datasets.PDBTrajectoryDataset +::: beignet.datasets.PDBbindDataset +::: beignet.datasets.ParquetDataset +::: beignet.datasets.QM7Dataset +::: beignet.datasets.QM7bDataset +::: beignet.datasets.QM8Dataset +::: beignet.datasets.QM9Dataset +::: beignet.datasets.REALDatabaseDataset +::: beignet.datasets.RandomEulerAngleDataset +::: beignet.datasets.RandomQuaternionDataset +::: beignet.datasets.RandomRotationDataset +::: beignet.datasets.RandomRotationMatrixDataset +::: beignet.datasets.RandomRotationVectorDataset +::: beignet.datasets.SAbDabDataset +::: beignet.datasets.SKEMPIDataset +::: beignet.datasets.SequenceDataset +::: beignet.datasets.SizedSequenceDataset +::: beignet.datasets.SwissProtDataset +::: beignet.datasets.TDCDataset +::: beignet.datasets.TherapeuticAntibodyProfilerDataset +::: beignet.datasets.Tox21Dataset +::: beignet.datasets.ToxCastDataset +::: beignet.datasets.TrEMBLDataset +::: beignet.datasets.TrajectoryDataset +::: beignet.datasets.USPTODataset +::: beignet.datasets.USPTOReactionProductDataset +::: beignet.datasets.UniProtDataset +::: beignet.datasets.UniRef100Dataset +::: beignet.datasets.UniRef50Dataset +::: beignet.datasets.UniRef90Dataset +::: beignet.datasets.VeithCytochromeP4501A2InhibitionDataset +::: beignet.datasets.VeithCytochromeP4502C19InhibitionDataset +::: beignet.datasets.VeithCytochromeP4502C9InhibitionDataset +::: beignet.datasets.VeithCytochromeP4502D6InhibitionDataset +::: beignet.datasets.VeithCytochromeP4503A4InhibitionDataset +::: beignet.datasets.WangEffectivePermeabilityDataset +::: beignet.datasets.ZINCDataset +::: beignet.datasets.ZhuAcuteToxicityLD50Dataset diff --git a/docs/reference/beignet.io.md b/docs/reference/beignet.io.md index b9cb2f6c2b..05d69e0d1e 100644 --- a/docs/reference/beignet.io.md +++ b/docs/reference/beignet.io.md @@ -1,3 +1 @@ -# beignet.io - ::: beignet.io.ThreadSafeFile diff --git a/docs/reference/datasets/antibody-affinity.md b/docs/reference/datasets/antibody-affinity.md new file mode 100644 index 0000000000..5874b0e4dd --- /dev/null +++ b/docs/reference/datasets/antibody-affinity.md @@ -0,0 +1 @@ +::: beignet.datasets.SAbDabDataset diff --git a/docs/reference/datasets/catalyst.md b/docs/reference/datasets/catalyst.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/reference/datasets/datasets.md b/docs/reference/datasets/datasets.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/reference/datasets/drug-interaction.md b/docs/reference/datasets/drug-interaction.md new file mode 100644 index 0000000000..4757c73db5 --- /dev/null +++ b/docs/reference/datasets/drug-interaction.md @@ -0,0 +1,3 @@ +::: beignet.datasets.BindingDBKdDataset +::: beignet.datasets.DAVISDataset +::: beignet.datasets.KIBADataset diff --git a/docs/reference/datasets/drug-intolerance.md b/docs/reference/datasets/drug-intolerance.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/reference/datasets/epitope.md b/docs/reference/datasets/epitope.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/reference/datasets/gene-disease-association.md b/docs/reference/datasets/gene-disease-association.md new file mode 100644 index 0000000000..238cde7afb --- /dev/null +++ b/docs/reference/datasets/gene-disease-association.md @@ -0,0 +1 @@ +::: beignet.datasets.DisGeNETDataset diff --git a/docs/reference/datasets/general-purpose.md b/docs/reference/datasets/general-purpose.md new file mode 100644 index 0000000000..c625105426 --- /dev/null +++ b/docs/reference/datasets/general-purpose.md @@ -0,0 +1,8 @@ +::: beignet.datasets.ATOM3DDataset +::: beignet.datasets.DataFrameDataset +::: beignet.datasets.FASTADataset +::: beignet.datasets.LMDBDataset +::: beignet.datasets.ParquetDataset +::: beignet.datasets.SequenceDataset +::: beignet.datasets.SizedSequenceDataset +::: beignet.datasets.TDCDataset diff --git a/docs/reference/datasets/geometry/transformations.md b/docs/reference/datasets/geometry/transformations.md index 741033fd05..d9dec1b787 100644 --- a/docs/reference/datasets/geometry/transformations.md +++ b/docs/reference/datasets/geometry/transformations.md @@ -1,3 +1,4 @@ +::: beignet.datasets.RandomRotationDataset ::: beignet.datasets.RandomEulerAngleDataset ::: beignet.datasets.RandomQuaternionDataset ::: beignet.datasets.RandomRotationMatrixDataset diff --git a/docs/reference/datasets/molecule.md b/docs/reference/datasets/molecule.md new file mode 100644 index 0000000000..37e7e09efa --- /dev/null +++ b/docs/reference/datasets/molecule.md @@ -0,0 +1,3 @@ +::: beignet.datasets.ChEMBLDataset +::: beignet.datasets.MOSESDataset +::: beignet.datasets.ZINCDataset diff --git a/docs/reference/datasets/pharmacokinetic-property/absorption.md b/docs/reference/datasets/pharmacokinetic-property/absorption.md new file mode 100644 index 0000000000..afc9127968 --- /dev/null +++ b/docs/reference/datasets/pharmacokinetic-property/absorption.md @@ -0,0 +1,7 @@ +::: beignet.datasets.AqSolDBSolubilityDataset +::: beignet.datasets.AstraZenecaLipophilicityDataset +::: beignet.datasets.BroccatelliPGlycoproteinInhibitionDataset +::: beignet.datasets.HouHumanIntestinalAbsorptionDataset +::: beignet.datasets.MaBioavailabilityDataset +::: beignet.datasets.NCATSPAMPAPermeabilityDataset +::: beignet.datasets.WangEffectivePermeabilityDataset diff --git a/docs/reference/datasets/pharmacokinetic-property/distribution.md b/docs/reference/datasets/pharmacokinetic-property/distribution.md new file mode 100644 index 0000000000..a0ae0777cd --- /dev/null +++ b/docs/reference/datasets/pharmacokinetic-property/distribution.md @@ -0,0 +1,3 @@ +::: beignet.datasets.AstraZenecaPlasmaProteinBindingRateDataset +::: beignet.datasets.LombardoVolumeOfDistributionAtSteadyStateDataset +::: beignet.datasets.MartinsBloodBrainBarrierDataset diff --git a/docs/reference/datasets/pharmacokinetic-property/excretion.md b/docs/reference/datasets/pharmacokinetic-property/excretion.md new file mode 100644 index 0000000000..1f915e434b --- /dev/null +++ b/docs/reference/datasets/pharmacokinetic-property/excretion.md @@ -0,0 +1,2 @@ +::: beignet.datasets.AstraZenecaClearanceDataset +::: beignet.datasets.ObachHalfLifeDataset diff --git a/docs/reference/datasets/pharmacokinetic-property/metabolism.md b/docs/reference/datasets/pharmacokinetic-property/metabolism.md new file mode 100644 index 0000000000..fbd458059f --- /dev/null +++ b/docs/reference/datasets/pharmacokinetic-property/metabolism.md @@ -0,0 +1,8 @@ +::: beignet.datasets.CarbonMangelsCytochromeP4502C9SubstrateDataset +::: beignet.datasets.CarbonMangelsCytochromeP4502D6SubstrateDataset +::: beignet.datasets.CarbonMangelsCytochromeP4503A4SubstrateDataset +::: beignet.datasets.VeithCytochromeP4501A2InhibitionDataset +::: beignet.datasets.VeithCytochromeP4502C19InhibitionDataset +::: beignet.datasets.VeithCytochromeP4502C9InhibitionDataset +::: beignet.datasets.VeithCytochromeP4502D6InhibitionDataset +::: beignet.datasets.VeithCytochromeP4503A4InhibitionDataset \ No newline at end of file diff --git a/docs/reference/datasets/protein-protein-interaction.md b/docs/reference/datasets/protein-protein-interaction.md new file mode 100644 index 0000000000..b1d3fc4942 --- /dev/null +++ b/docs/reference/datasets/protein-protein-interaction.md @@ -0,0 +1 @@ +::: beignet.datasets.HuRIDataset diff --git a/docs/reference/datasets/protein/protein-sequence.md b/docs/reference/datasets/protein/protein-sequence.md new file mode 100644 index 0000000000..4c108f1b38 --- /dev/null +++ b/docs/reference/datasets/protein/protein-sequence.md @@ -0,0 +1,6 @@ +::: beignet.datasets.UniProtDataset +::: beignet.datasets.UniRef50Dataset +::: beignet.datasets.UniRef90Dataset +::: beignet.datasets.UniRef100Dataset +::: beignet.datasets.SwissProtDataset +::: beignet.datasets.TrEMBLDataset diff --git a/docs/reference/datasets/protein/protein-structure.md b/docs/reference/datasets/protein/protein-structure.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/reference/datasets/quantum-mechanics.md b/docs/reference/datasets/quantum-mechanics.md new file mode 100644 index 0000000000..772b4f7d9e --- /dev/null +++ b/docs/reference/datasets/quantum-mechanics.md @@ -0,0 +1,4 @@ +::: beignet.datasets.QM7Dataset +::: beignet.datasets.QM7bDataset +::: beignet.datasets.QM8Dataset +::: beignet.datasets.QM9Dataset \ No newline at end of file diff --git a/docs/reference/datasets/reaction.md b/docs/reference/datasets/reaction.md new file mode 100644 index 0000000000..38be59cc33 --- /dev/null +++ b/docs/reference/datasets/reaction.md @@ -0,0 +1,2 @@ +::: beignet.datasets.USPTOReactionProductDataset +::: beignet.datasets.USPTODataset diff --git a/docs/reference/datasets/retrosynthesis.md b/docs/reference/datasets/retrosynthesis.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/reference/datasets/sequences.md b/docs/reference/datasets/sequences.md index 8566583fc2..a9865cdd66 100644 --- a/docs/reference/datasets/sequences.md +++ b/docs/reference/datasets/sequences.md @@ -1,3 +1,14 @@ -::: beignet.datasets.FASTADataset -::: beignet.datasets.SequenceDataset -::: beignet.datasets.SizedSequenceDataset +::: beignet.datasets.ATOM3DMSPDataset +::: beignet.datasets.ATOM3DPPIDataset +::: beignet.datasets.ATOM3DPSRDataset +::: beignet.datasets.ATOM3DRESDataset +::: beignet.datasets.ATOM3DRSRDataset +::: beignet.datasets.ATOM3DSMPDataset +::: beignet.datasets.DrugCombDataset +::: beignet.datasets.FreeSolvDataset +::: beignet.datasets.GDSC1Dataset +::: beignet.datasets.GDSC2Dataset +::: beignet.datasets.PDB70Dataset +::: beignet.datasets.PDBbindDataset +::: beignet.datasets.REALDatabaseDataset +::: beignet.datasets.SKEMPIDataset diff --git a/docs/reference/datasets/toxicity.md b/docs/reference/datasets/toxicity.md new file mode 100644 index 0000000000..ee0670d467 --- /dev/null +++ b/docs/reference/datasets/toxicity.md @@ -0,0 +1,4 @@ +::: beignet.datasets.ClinToxDataset +::: beignet.datasets.Tox21Dataset +::: beignet.datasets.ToxCastDataset +::: beignet.datasets.ZhuAcuteToxicityLD50Dataset diff --git a/mkdocs.yml b/mkdocs.yml index 6d72499eec..6e0d7a6ae2 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -34,19 +34,15 @@ nav: - "Probabilists’ Hermite polynomial": "reference/operators/special-functions/orthogonal-polynomials/probabilists-hermite-polynomial.md" - "Laguerre polynomial": "reference/operators/special-functions/orthogonal-polynomials/laguerre-polynomial.md" - "Legendre polynomial": "reference/operators/special-functions/orthogonal-polynomials/legendre-polynomial.md" - - "Thermodynamics": "reference/reference/operators/thermodynamics.md" - - "beignet.datasets": - - "Geometry": - - "Transformations": "reference/datasets/geometry/transformations.md" - - "Protein sequences": "reference/datasets/protein-sequences.md" - - "Sequences": "reference/datasets/sequences.md" + - "Thermodynamics": "reference/operators/thermodynamics.md" + - "beignet.datasets": "reference/beignet.datasets.md" - "beignet.features": - "General-purpose": "reference/features/general-purpose.md" - "Geometry": - "Transformations": "reference/features/geometry/transformations.md" - "beignet.func": - "Molecular dynamics": "reference/func/molecular-dynamics.md" - - "beignet.io": "reference/operators.io.md" + - "beignet.io": "reference/beignet.io.md" - "beignet.transforms": - "General-purpose": "reference/transforms/general-purpose.md" plugins: diff --git a/pyproject.toml b/pyproject.toml index 678fd9ec83..a7ae829607 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,7 +22,13 @@ requires-python = ">=3.10" [project.optional-dependencies] all = [ - "beignet[docs,mdtraj,test]", + "beignet[datasets,docs,mdtraj,test]", +] +datasets = [ + "biopython", + "lmdb", + "pandas", + "pooch", ] docs = [ "mkdocs-material", diff --git a/src/beignet/datasets/__init__.py b/src/beignet/datasets/__init__.py index e7e23958db..d613dc1b27 100644 --- a/src/beignet/datasets/__init__.py +++ b/src/beignet/datasets/__init__.py @@ -1,35 +1,185 @@ +from ._aqsoldb_solubility_dataset import AqSolDBSolubilityDataset +from ._astrazeneca_clearance_dataset import AstraZenecaClearanceDataset +from ._astrazeneca_lipophilicity_dataset import AstraZenecaLipophilicityDataset +from ._astrazeneca_plasma_protein_binding_rate_dataset import ( + AstraZenecaPlasmaProteinBindingRateDataset, +) +from ._atom3d_dataset import ATOM3DDataset +from ._atom3d_msp_dataset import ATOM3DMSPDataset +from ._atom3d_ppi_dataset import ATOM3DPPIDataset +from ._atom3d_psr_dataset import ATOM3DPSRDataset +from ._atom3d_res_dataset import ATOM3DRESDataset +from ._atom3d_rsr_dataset import ATOM3DRSRDataset +from ._atom3d_smp_dataset import ATOM3DSMPDataset +from ._bindingdb_ic50_dataset import BindingDBIC50Dataset +from ._bindingdb_kd_dataset import BindingDBKdDataset +from ._bindingdb_ki_dataset import BindingDBKiDataset +from ._broccatelli_p_glycoprotein_inhibition_dataset import ( + BroccatelliPGlycoproteinInhibitionDataset, +) +from ._carbon_mangels_cytochrome_p450_2c9_substrate_dataset import ( + CarbonMangelsCytochromeP4502C9SubstrateDataset, +) +from ._carbon_mangels_cytochrome_p450_2d6_substrate_dataset import ( + CarbonMangelsCytochromeP4502D6SubstrateDataset, +) +from ._carbon_mangels_cytochrome_p450_3a4_substrate_dataset import ( + CarbonMangelsCytochromeP4503A4SubstrateDataset, +) +from ._chembl_dataset import ChEMBLDataset +from ._clintox_dataset import ClinToxDataset +from ._data_frame_dataset import DataFrameDataset +from ._davis_dataset import DAVISDataset +from ._disgenet_dataset import DisGeNETDataset +from ._drugcomb_dataset import DrugCombDataset from ._fasta_dataset import FASTADataset +from ._freesolv_dataset import FreeSolvDataset +from ._gdsc1_dataset import GDSC1Dataset +from ._gdsc2_dataset import GDSC2Dataset from ._hdf5_trajectory_dataset import HDF5TrajectoryDataset +from ._hou_human_intestinal_absorption_dataset import ( + HouHumanIntestinalAbsorptionDataset, +) +from ._huri_dataset import HuRIDataset +from ._jespersen_iedb_dataset import JespersenIEDBDataset +from ._jespersen_pdb_dataset import JespersenPDBDataset +from ._kiba_dataset import KIBADataset +from ._lmdb_dataset import LMDBDataset +from ._lombardo_volume_of_distribution_at_steady_state_dataset import ( + LombardoVolumeOfDistributionAtSteadyStateDataset, +) +from ._ma_bioavailability_dataset import MaBioavailabilityDataset +from ._martins_blood_brain_barrier_dataset import MartinsBloodBrainBarrierDataset +from ._moses_dataset import MOSESDataset +from ._ncats_pampa_permeability_dataset import NCATSPAMPAPermeabilityDataset +from ._obach_half_life_dataset import ObachHalfLifeDataset +from ._parquet_dataset import ParquetDataset from ._pdb_trajectory_dataset import PDBTrajectoryDataset +from ._pdbbind_dataset import PDBbindDataset +from ._qm7_dataset import QM7Dataset +from ._qm7b_dataset import QM7bDataset +from ._qm8_dataset import QM8Dataset +from ._qm9_dataset import QM9Dataset from ._random_euler_angle_dataset import RandomEulerAngleDataset from ._random_quaternion_dataset import RandomQuaternionDataset +from ._random_rotation_dataset import RandomRotationDataset from ._random_rotation_matrix_dataset import RandomRotationMatrixDataset from ._random_rotation_vector_dataset import RandomRotationVectorDataset +from ._real_database_dataset import REALDatabaseDataset +from ._sabdab_dataset import SAbDabDataset from ._sequence_dataset import SequenceDataset from ._sized_sequence_dataset import SizedSequenceDataset +from ._skempi_dataset import SKEMPIDataset from ._swissprot_dataset import SwissProtDataset +from ._tdc_dataset import TDCDataset +from ._therapeutic_antibody_profiler_dataset import TherapeuticAntibodyProfilerDataset +from ._tox21_dataset import Tox21Dataset +from ._toxcast_dataset import ToxCastDataset from ._trajectory_dataset import TrajectoryDataset from ._trembl_dataset import TrEMBLDataset from ._uniprot_dataset import UniProtDataset from ._uniref50_dataset import UniRef50Dataset from ._uniref90_dataset import UniRef90Dataset from ._uniref100_dataset import UniRef100Dataset +from ._uspto_dataset import USPTODataset +from ._uspto_reaction_product_dataset import USPTOReactionProductDataset +from ._veith_cytochrome_p450_1a2_inhibition_dataset import ( + VeithCytochromeP4501A2InhibitionDataset, +) +from ._veith_cytochrome_p450_2c9_inhibition_dataset import ( + VeithCytochromeP4502C9InhibitionDataset, +) +from ._veith_cytochrome_p450_2c19_inhibition_dataset import ( + VeithCytochromeP4502C19InhibitionDataset, +) +from ._veith_cytochrome_p450_2d6_inhibition_dataset import ( + VeithCytochromeP4502D6InhibitionDataset, +) +from ._veith_cytochrome_p450_3a4_inhibition_dataset import ( + VeithCytochromeP4503A4InhibitionDataset, +) +from ._wang_effective_permeability_dataset import WangEffectivePermeabilityDataset +from ._zhu_acute_toxicity_ld50_dataset import ZhuAcuteToxicityLD50Dataset +from ._zinc_dataset import ZINCDataset __all__ = [ + "ATOM3DDataset", + "ATOM3DMSPDataset", + "ATOM3DPPIDataset", + "ATOM3DPSRDataset", + "ATOM3DRESDataset", + "ATOM3DRSRDataset", + "ATOM3DSMPDataset", + "AqSolDBSolubilityDataset", + "AstraZenecaClearanceDataset", + "AstraZenecaLipophilicityDataset", + "AstraZenecaPlasmaProteinBindingRateDataset", + "BindingDBIC50Dataset", + "BindingDBKdDataset", + "BindingDBKiDataset", + "BroccatelliPGlycoproteinInhibitionDataset", + "CarbonMangelsCytochromeP4502C9SubstrateDataset", + "CarbonMangelsCytochromeP4502D6SubstrateDataset", + "CarbonMangelsCytochromeP4503A4SubstrateDataset", + "ChEMBLDataset", + "ClinToxDataset", + "DAVISDataset", + "DataFrameDataset", + "DisGeNETDataset", + "DrugCombDataset", "FASTADataset", + "FreeSolvDataset", + "GDSC1Dataset", + "GDSC2Dataset", "HDF5TrajectoryDataset", + "HouHumanIntestinalAbsorptionDataset", + "HuRIDataset", + "JespersenIEDBDataset", + "JespersenPDBDataset", + "KIBADataset", + "LMDBDataset", + "LombardoVolumeOfDistributionAtSteadyStateDataset", + "MOSESDataset", + "MaBioavailabilityDataset", + "MartinsBloodBrainBarrierDataset", + "NCATSPAMPAPermeabilityDataset", + "ObachHalfLifeDataset", "PDBTrajectoryDataset", + "PDBbindDataset", + "ParquetDataset", + "QM7Dataset", + "QM7bDataset", + "QM8Dataset", + "QM9Dataset", + "REALDatabaseDataset", "RandomEulerAngleDataset", "RandomQuaternionDataset", + "RandomRotationDataset", "RandomRotationMatrixDataset", "RandomRotationVectorDataset", + "SAbDabDataset", + "SKEMPIDataset", "SequenceDataset", "SizedSequenceDataset", "SwissProtDataset", + "TDCDataset", + "TherapeuticAntibodyProfilerDataset", + "Tox21Dataset", + "ToxCastDataset", "TrEMBLDataset", "TrajectoryDataset", + "USPTODataset", + "USPTOReactionProductDataset", "UniProtDataset", "UniRef100Dataset", "UniRef50Dataset", "UniRef90Dataset", + "VeithCytochromeP4501A2InhibitionDataset", + "VeithCytochromeP4502C19InhibitionDataset", + "VeithCytochromeP4502C9InhibitionDataset", + "VeithCytochromeP4502D6InhibitionDataset", + "VeithCytochromeP4503A4InhibitionDataset", + "WangEffectivePermeabilityDataset", + "ZINCDataset", + "ZhuAcuteToxicityLD50Dataset", ] diff --git a/src/beignet/datasets/_aqsoldb_solubility_dataset.py b/src/beignet/datasets/_aqsoldb_solubility_dataset.py new file mode 100644 index 0000000000..16e8bc1481 --- /dev/null +++ b/src/beignet/datasets/_aqsoldb_solubility_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class AqSolDBSolubilityDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4259610, + suffix="tsv", + checksum="md5:f7a675706bfe7e75c278f16dd2477b03", + x_keys=["Drug"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_astrazeneca_clearance_dataset.py b/src/beignet/datasets/_astrazeneca_clearance_dataset.py new file mode 100644 index 0000000000..630d679ad4 --- /dev/null +++ b/src/beignet/datasets/_astrazeneca_clearance_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class AstraZenecaClearanceDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4266187, + suffix="tsv", + checksum="md5:7036ab2a23f6db37843d0ecc072bbddc", + x_keys=["X"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_astrazeneca_lipophilicity_dataset.py b/src/beignet/datasets/_astrazeneca_lipophilicity_dataset.py new file mode 100644 index 0000000000..5190aab1a9 --- /dev/null +++ b/src/beignet/datasets/_astrazeneca_lipophilicity_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class AstraZenecaLipophilicityDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4259595, + suffix="tsv", + checksum="md5:77e72d4ec76530271bf4e296b62368ff", + x_keys=["Drug"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_astrazeneca_plasma_protein_binding_rate_dataset.py b/src/beignet/datasets/_astrazeneca_plasma_protein_binding_rate_dataset.py new file mode 100644 index 0000000000..eac11e2e10 --- /dev/null +++ b/src/beignet/datasets/_astrazeneca_plasma_protein_binding_rate_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class AstraZenecaPlasmaProteinBindingRateDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=6413140, + suffix="tsv", + checksum="md5:f3b700ea6b1f624fdbcf6a1c67937b00", + x_keys=["Drug", "Species"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_atom3d_dataset.py b/src/beignet/datasets/_atom3d_dataset.py new file mode 100644 index 0000000000..ceb30bdcf6 --- /dev/null +++ b/src/beignet/datasets/_atom3d_dataset.py @@ -0,0 +1,39 @@ +from pathlib import Path +from typing import Callable, Optional, Union + +import beignet.io +from beignet.transforms import Transform + +from ._lmdb_dataset import LMDBDataset + + +class ATOM3DDataset(LMDBDataset): + def __init__( + self, + root: Union[str, Path], + path: Union[str, Path], + resource: str, + name: str, + *, + checksum: Optional[str] = None, + download: bool = False, + transform: Union[Callable, Transform, None] = None, + ): + self._root = root + + if isinstance(self._root, str): + self._root = Path(self._root).resolve() + + self._transform_fn = transform + + if download: + beignet.io.download_and_extract_archive( + resource, + self._root / f"ATOM3D{name}", + checksum=checksum, + ) + + super().__init__( + self._root / f"ATOM3D{name}" / path, + transform=transform, + ) diff --git a/src/beignet/datasets/_atom3d_msp_dataset.py b/src/beignet/datasets/_atom3d_msp_dataset.py new file mode 100644 index 0000000000..40d7d313b1 --- /dev/null +++ b/src/beignet/datasets/_atom3d_msp_dataset.py @@ -0,0 +1,53 @@ +from pathlib import Path +from typing import Callable, Tuple, Union + +import torch +from pandas import DataFrame +from torch import Tensor + +from beignet.transforms import Transform + +from ._atom3d_dataset import ATOM3DDataset + + +class ATOM3DMSPDataset(ATOM3DDataset): + def __init__( + self, + root: Union[str, Path], + *, + download: bool = False, + transform: Union[Callable, Transform, None] = None, + target_transform: Union[Callable, Transform, None] = None, + ): + super().__init__( + root, + "raw/MSP/data", + "https://zenodo.org/record/4962515/files/MSP-raw.tar.gz", + "MSP", + checksum="77aeb79cfc80bd51cdfb2aa321bf6128", + download=download, + ) + + self._transform_fn = transform + + self._target_transform_fn = target_transform + + def __getitem__( + self, + index: int, + ) -> Tuple[Tuple[DataFrame, DataFrame], Tensor]: + item = super().__getitem__(index) + + structure = DataFrame(**item["original_atoms"]) + + mutant = DataFrame(**item["mutated_atoms"]) + + if self._transform_fn is not None: + structure, mutant = self._transform_fn(structure, mutant) + + target = torch.tensor(int(item["label"])) + + if self._target_transform_fn is not None: + target = self._target_transform_fn(target) + + return (structure, mutant), target diff --git a/src/beignet/datasets/_atom3d_ppi_dataset.py b/src/beignet/datasets/_atom3d_ppi_dataset.py new file mode 100644 index 0000000000..e025be363f --- /dev/null +++ b/src/beignet/datasets/_atom3d_ppi_dataset.py @@ -0,0 +1,52 @@ +from pathlib import Path +from typing import Callable, Tuple, Union + +from pandas import DataFrame + +from beignet.transforms import Transform + +from ._atom3d_dataset import ATOM3DDataset + + +class ATOM3DPPIDataset(ATOM3DDataset): + def __init__( + self, + root: Union[str, Path], + *, + download: bool = False, + transform: Union[Callable, Transform, None] = None, + target_transform: Union[Callable, Transform, None] = None, + joint_transform_fn: Union[Callable, Transform, None] = None, + ): + super().__init__( + root, + "raw/DIPS/data", + "https://zenodo.org/record/4911102/files/PPI-raw.tar.gz", + "PPI", + checksum="621977d132b39957e3480a24a30a7358", + download=download, + ) + + self._transform_fn = transform + + self._target_transform_fn = target_transform + + self._joint_transform_fn = joint_transform_fn + + def __getitem__(self, index: int) -> Tuple[DataFrame, DataFrame]: + item = super().__getitem__(index) + + features = DataFrame(**item["atoms_pairs"]) + + target = DataFrame(**item["atoms_neighbors"]) + + if self._joint_transform_fn is not None: + features, target = self._joint_transform_fn(features, target) + + if self._transform_fn is not None: + features = self._transform_fn(features) + + if self._target_transform_fn is not None: + target = self._target_transform_fn(target) + + return features, target diff --git a/src/beignet/datasets/_atom3d_psr_dataset.py b/src/beignet/datasets/_atom3d_psr_dataset.py new file mode 100644 index 0000000000..07b44fef93 --- /dev/null +++ b/src/beignet/datasets/_atom3d_psr_dataset.py @@ -0,0 +1,54 @@ +from pathlib import Path +from typing import Callable, Dict, Tuple, Union + +import torch +from pandas import DataFrame +from torch import Tensor + +from beignet.transforms import Transform + +from ._atom3d_dataset import ATOM3DDataset + + +class ATOM3DPSRDataset(ATOM3DDataset): + def __init__( + self, + root: Union[str, Path], + *, + download: bool = False, + transform: Union[Callable, Transform, None] = None, + target_transform: Union[Callable, Transform, None] = None, + ): + super().__init__( + root, + "raw/casp5_to_13/data", + "https://zenodo.org/record/4915648/files/PSR-raw.tar.gz", + "PSR", + checksum="80caef3c98febb70951fa244c8303039", + download=download, + ) + + self._transform_fn = transform + + self._target_transform_fn = target_transform + + def __getitem__( + self, + index: int, + ) -> Tuple[DataFrame, Dict[str, Tensor]]: + item = super().__getitem__(index) + + features = DataFrame(**item["atoms"]) + + if self._transform_fn is not None: + features = self._transform_fn(features) + + target = item["scores"] + + for k, _ in target.items(): + target[k] = torch.tensor(target[k]) + + if self._target_transform_fn is not None: + target = self._target_transform_fn(target) + + return features, target diff --git a/src/beignet/datasets/_atom3d_res_dataset.py b/src/beignet/datasets/_atom3d_res_dataset.py new file mode 100644 index 0000000000..a4de844337 --- /dev/null +++ b/src/beignet/datasets/_atom3d_res_dataset.py @@ -0,0 +1,85 @@ +from pathlib import Path +from typing import Callable, Tuple, Union + +from pandas import DataFrame + +from beignet.transforms import Transform + +from ._atom3d_dataset import ATOM3DDataset + + +class ATOM3DRESDataset(ATOM3DDataset): + def __init__( + self, + root: Union[str, Path], + *, + download: bool = False, + transform: Union[Callable, Transform, None] = None, + target_transform: Union[Callable, Transform, None] = None, + ): + """ + ATOM3D Residue Identity (RES) consists of atomic environments + extracted from non-redundant structures in the Protein Data Bank. + This is formulated as a classification task where the identity of + the amino acid in the center of the environment is predicted based + on all other atoms. + + Each sample is a pair of features and a target, where features is + the molecule’s atomic coordinates and target is the environments’s + atomic coordinates + + Parameters + ---------- + root : Union[str, Path] + The root directory of the dataset. + + download : bool, optional + If True, download the dataset from the specified source, + by default `False`. + + transform : Union[Callable, Transform, None], optional + The transformation function to be applied to the features, + by default `None`. + + target_transform : Union[Callable, Transform, None], optional + The transformation function to be applied to the targets, + by default `None`. + """ + super().__init__( + root, + "raw/RES/data", + "https://zenodo.org/record/5026743/files/RES-raw.tar.gz", + "RES", + checksum="3d6b6c61efb890a8baa303280b6589d9", + download=download, + ) + + self._transform_fn = transform + + self._target_transform_fn = target_transform + + def __getitem__(self, index: int) -> Tuple[DataFrame, DataFrame]: + """ + Parameters + ---------- + index : int + The index of the item to retrieve from the dataset. + + Returns + ------- + Tuple[DataFrame, DataFrame] + A tuple containing the features and target of the item. + """ + item = super().__getitem__(index) + + features = DataFrame(**item["atoms"]) + + if self._transform_fn is not None: + features = self._transform_fn(features) + + target = DataFrame(**item["labels"]) + + if self._target_transform_fn is not None: + target = self._target_transform_fn(target) + + return features, target diff --git a/src/beignet/datasets/_atom3d_rsr_dataset.py b/src/beignet/datasets/_atom3d_rsr_dataset.py new file mode 100644 index 0000000000..39aeb035a5 --- /dev/null +++ b/src/beignet/datasets/_atom3d_rsr_dataset.py @@ -0,0 +1,172 @@ +from pathlib import Path +from typing import Callable, Dict, Tuple, Union + +import torch +from pandas import DataFrame +from torch import Tensor + +from beignet.transforms import Transform + +from ._atom3d_dataset import ATOM3DDataset + + +class ATOM3DRSRDataset(ATOM3DDataset): + def __init__( + self, + root: Union[str, Path], + *, + download: bool = False, + transform: Union[Callable, Transform, None] = None, + target_transform: Union[Callable, Transform, None] = None, + ): + """ + The ATOM3D RNA Structure Ranking (RSR) task predicts the + three-dimensional structure of an RNA molecule, given its sequence. + A total of 21 RNAs are included, which consist of the first 21 RNAs + from the RNA-Puzzles competition (Cruz et al., 2011). + + This problem is prhased as candidate ranking. For each RNA, + candidate structural models are generated using FARFAR2 (“Silly Boy” + Watkins et al., 2020) and calculate each candidate’s atoms’ root + mean squared deviation (RMSD) to the experimentally determined + structure. + + Each sample is a pair of features and a target, where features is + the molecule’s atomic coordinates and target is a dictionary of the + following scores: + + .. list-table:: Target + :widths: 20 80 + :header-rows: 1 + + * - Key + - Description + * - score + - + * - fa_atr + - + * - fa_rep + - + * - fa_intra_rep + - + * - lk_nonpolar + - + * - fa_elec_rna_phos_phos + - + * - rna_torsion + - + * - suiteness_bonus + - + * - rna_sugar_close + - + * - fa_stack + - + * - stack_elec + - + * - geom_sol_fast + - + * - hbond_sr_bb_sc + - + * - hbond_lr_bb_sc + - + * - hbond_sc + - + * - ref + - + * - free_suite + - + * - free_2HOprime + - + * - intermol + - + * - other_pose + - + * - loop_close + - + * - linear_chainbreak + - + * - rms + - + * - rms_stem + - + * - time + - + * - N_WC + - + * - N_NWC + - + * - N_BS + - + * - N_BP + - + * - natWC + - + * - natNWC + - + * - natBP + - + * - f_natWC + - + * - f_natNWC + - + * - f_natBP + - + + Parameters + ---------- + root : Union[str, Path] + The root directory of the dataset. + + download : bool, optional + If `True`, download the dataset from the specified source, + by default `False`. + + transform : Union[Callable, Transform, None], optional + The transformation function to be applied to the features, + by default `None`. + + target_transform : Union[Callable, Transform, None], optional + The transformation function to be applied to the targets, + by default `None`. + """ + super().__init__( + root, + "raw/candidates/data", + "https://zenodo.org/record/4961085/files/RSR-raw.tar.gz", + "RSR", + checksum="68830ab0ab95cf3d218785a4e4e7669c", + download=download, + ) + + self._transform_fn = transform + + self._target_transform_fn = target_transform + + def __getitem__(self, index: int) -> Tuple[DataFrame, Dict[str, Tensor]]: + """ + Parameters + ---------- + index : int + The index of the item to retrieve from the dataset. + + Returns + ------- + Tuple[DataFrame, Dict[str, Tensor]] + A tuple containing the features and target of the item. + """ + item = super().__getitem__(index) + + features = DataFrame(**item["atoms"]) + + if self._transform_fn is not None: + features = self._transform_fn(features) + + target = item["scores"] + + for k, v in target.items(): + target[k] = torch.tensor(v) + + if self._target_transform_fn is not None: + target = self._target_transform_fn(target) + + return features, target diff --git a/src/beignet/datasets/_atom3d_smp_dataset.py b/src/beignet/datasets/_atom3d_smp_dataset.py new file mode 100644 index 0000000000..92ac48d11a --- /dev/null +++ b/src/beignet/datasets/_atom3d_smp_dataset.py @@ -0,0 +1,170 @@ +from pathlib import Path +from typing import Callable, Dict, Tuple, Union + +import torch +from pandas import DataFrame +from torch import Tensor + +from beignet.transforms import Transform + +from ._atom3d_dataset import ATOM3DDataset + + +class ATOM3DSMPDataset(ATOM3DDataset): + def __init__( + self, + root: Union[str, Path], + *, + download: bool = False, + transform: Union[Callable, Transform, None] = None, + target_transform: Union[Callable, Transform, None] = None, + ): + """ + ATOM3D Small Molecule Properties (SMP) is a dataset of structures + and energetic, electronic, and thermodynamic properties for 134,000 + stable small organic molecules, obtained from quantum-chemical + calculations. The task is to predict the molecular properties from + the ground-state structure. + + Some molecules have been excluded because they failed consistency + tests or were not properly processed. + + Each sample is a pair of features and a target, where features is + the molecule’s atomic coordinates and target is a dictionary of the + following energetic, electronic, and thermodynamic properties: + + .. list-table:: Target + :widths: 20 20 60 + :header-rows: 1 + + * - Key + - Unit + - Description + * - a + - GHz + - Rotational constant A + * - b + - GHz + - Rotational constant B + * - c + - GHz + - Rotational constant C + * - mu + - Debye + - Dipole moment + * - alpha + - Bohr^3 + - Isotropic polarizability + * - homo + - Hartree + - Energy of Highest occupied molecular orbital (HOMO) + * - lumo + - Hartree + - Energy of Lowest occupied molecular orbital (LUMO) + * - gap + - Hartree + - Gap, difference between LUMO and HOMO + * - r2 + - Bohr^2 + - Electronic spatial extent + * - zpve + - Hartree + - Zero point vibrational energy + * - u0 + - Hartree + - Internal energy at 0 K + * - u + - Hartree + - Internal energy at 298.15 K + * - h + - Hartree + - Enthalpy at 298.15 K + * - g + - Hartree + - Free energy at 298.15 K + * - cv + - cal/(mol K) + - Heat capacity at 298.15 K + + Parameters + ---------- + root : Union[str, Path] + The root directory of the dataset. + + download : bool, optional + If True, download the dataset from the specified source, + by default `False`. + + transform : Union[Callable, Transform, None], optional + The transformation function to be applied to the features, + by default `None`. + + target_transform : Union[Callable, Transform, None], optional + The transformation function to be applied to the targets, + by default `None`. + """ + super().__init__( + root, + "raw/QM9/data", + "https://zenodo.org/record/4911142/files/SMP-raw.tar.gz", + "SMP", + checksum="52cc7955c0f80f7dd9faf041e171f405", + download=download, + ) + + self._transform_fn = transform + + self._target_transform_fn = target_transform + + def __getitem__( + self, + index: int, + ) -> Tuple[Tuple[DataFrame], Dict[str, Tensor]]: + """ + Parameters + ---------- + index : int + The index of the item to retrieve from the dataset. + + Returns + ------- + Tuple[Tuple[DataFrame], Dict[str, Tensor]] + A tuple containing the features and target of the item. + + """ + item = super().__getitem__(index) + + features = DataFrame(**item["atoms"]) + + if self._transform_fn is not None: + features = self._transform_fn(features) + + target = {} + + for k, v in zip( + [ + "a", + "b", + "c", + "mu", + "alpha", + "homo", + "lumo", + "gap", + "r2", + "zpve", + "u0", + "u", + "h", + "g", + "cv", + ], + item["labels"], + strict=False, + ): + target[k] = torch.tensor(v) + + if self._target_transform_fn is not None: + target = self._target_transform_fn(target) + + return features, target diff --git a/src/beignet/datasets/_bindingdb_ic50_dataset.py b/src/beignet/datasets/_bindingdb_ic50_dataset.py new file mode 100644 index 0000000000..deec10232e --- /dev/null +++ b/src/beignet/datasets/_bindingdb_ic50_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class BindingDBIC50Dataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4291560, + suffix="csv", + checksum="md5:a6ca198002c335aa9a30248cf3795413", + x_keys=["X1", "X2"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_bindingdb_kd_dataset.py b/src/beignet/datasets/_bindingdb_kd_dataset.py new file mode 100644 index 0000000000..3e51580614 --- /dev/null +++ b/src/beignet/datasets/_bindingdb_kd_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class BindingDBKdDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4291555, + suffix="csv", + checksum="md5:c463f536eeec3f99cdab9365d86e7154", + x_keys=["X1", "X2"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_bindingdb_ki_dataset.py b/src/beignet/datasets/_bindingdb_ki_dataset.py new file mode 100644 index 0000000000..f2b2a6eaed --- /dev/null +++ b/src/beignet/datasets/_bindingdb_ki_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class BindingDBKiDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4291556, + suffix="csv", + checksum="md5:187d6c8926c608e24f4469373811806d", + x_keys=["X1", "X2"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_broccatelli_p_glycoprotein_inhibition_dataset.py b/src/beignet/datasets/_broccatelli_p_glycoprotein_inhibition_dataset.py new file mode 100644 index 0000000000..ba2bf39208 --- /dev/null +++ b/src/beignet/datasets/_broccatelli_p_glycoprotein_inhibition_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class BroccatelliPGlycoproteinInhibitionDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4259597, + suffix="tsv", + checksum="md5:6915ccf0b5d6b9c8fe4d98cb5759a88a", + x_keys=["Drug"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_carbon_mangels_cytochrome_p450_2c9_substrate_dataset.py b/src/beignet/datasets/_carbon_mangels_cytochrome_p450_2c9_substrate_dataset.py new file mode 100644 index 0000000000..319e59b391 --- /dev/null +++ b/src/beignet/datasets/_carbon_mangels_cytochrome_p450_2c9_substrate_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class CarbonMangelsCytochromeP4502C9SubstrateDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4259584, + suffix="tsv", + checksum="md5:3f13c61b816868eb5d7b3b0c61023c04", + x_keys=["Drug"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_carbon_mangels_cytochrome_p450_2d6_substrate_dataset.py b/src/beignet/datasets/_carbon_mangels_cytochrome_p450_2d6_substrate_dataset.py new file mode 100644 index 0000000000..71220b90ab --- /dev/null +++ b/src/beignet/datasets/_carbon_mangels_cytochrome_p450_2d6_substrate_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class CarbonMangelsCytochromeP4502D6SubstrateDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4259578, + suffix="tsv", + checksum="md5:d1f45520803a0d47c2056abf8f5548c7", + x_keys=["Drug"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_carbon_mangels_cytochrome_p450_3a4_substrate_dataset.py b/src/beignet/datasets/_carbon_mangels_cytochrome_p450_3a4_substrate_dataset.py new file mode 100644 index 0000000000..43e04d2b17 --- /dev/null +++ b/src/beignet/datasets/_carbon_mangels_cytochrome_p450_3a4_substrate_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class CarbonMangelsCytochromeP4503A4SubstrateDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4259581, + suffix="tsv", + checksum="md5:25cfb80ef8f04e035d5944228194ca95", + x_keys=["Drug"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_chembl_dataset.py b/src/beignet/datasets/_chembl_dataset.py new file mode 100644 index 0000000000..1e9b10ab71 --- /dev/null +++ b/src/beignet/datasets/_chembl_dataset.py @@ -0,0 +1,39 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class ChEMBLDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + """ + super().__init__( + root=root, + download=download, + identifier=4170965, + suffix="csv", + checksum="md5:b0cedfe468c6331487f7084236944bac", + x_keys=["smiles"], + y_keys=[], + transform=transform, + ) diff --git a/src/beignet/datasets/_chen_sabdab_dataset.py b/src/beignet/datasets/_chen_sabdab_dataset.py new file mode 100644 index 0000000000..7d438e75b3 --- /dev/null +++ b/src/beignet/datasets/_chen_sabdab_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class ChenSAbDabDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4167164, + suffix="tsv", + checksum="md5:3b58b73e0d5371a2ccc865ca045a2061", + x_keys=["X"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_clintox_dataset.py b/src/beignet/datasets/_clintox_dataset.py new file mode 100644 index 0000000000..1c8efb1495 --- /dev/null +++ b/src/beignet/datasets/_clintox_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class ClinToxDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4259572, + suffix="tsv", + checksum="md5:e8e7c5ba675129db0161913ba4871834", + x_keys=["Drug"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_data_frame_dataset.py b/src/beignet/datasets/_data_frame_dataset.py new file mode 100644 index 0000000000..7f42c6b47c --- /dev/null +++ b/src/beignet/datasets/_data_frame_dataset.py @@ -0,0 +1,46 @@ +from pathlib import Path +from typing import Callable, TypeVar, Union + +from pandas import DataFrame +from torch.utils.data import Dataset + +from beignet.transforms import Transform + +T = TypeVar("T") + + +class DataFrameDataset(Dataset): + _data: DataFrame + + def __init__( + self, + root: Union[str, Path], + *, + transform: Union[Callable, Transform, None] = None, + target_transform: Union[Callable, Transform, None] = None, + ) -> None: + """ + :param root: Root directory where the dataset subdirectory exists or, + if :attr:`download` is ``True``, the directory where the dataset + subdirectory will be created and the dataset downloaded. + + :param transform: A ``Callable`` or ``Transform`` that maps data to + transformed data (default: ``None``). + + :param target_transform: ``Callable`` or ``Transform`` that maps a + target to a transformed target (default: ``None``). + """ + if isinstance(root, str): + root = Path(root).resolve() + + self._root = root + + self._transform_fn = transform + + self._target_transform_fn = target_transform + + def __getitem__(self, index: int) -> T: + return self._data.iloc[index] + + def __len__(self) -> int: + return len(self._data) diff --git a/src/beignet/datasets/_davis_dataset.py b/src/beignet/datasets/_davis_dataset.py new file mode 100644 index 0000000000..d0b746bc6e --- /dev/null +++ b/src/beignet/datasets/_davis_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class DAVISDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=5219748, + suffix="md5:6c7949b81aea69e9d816db88602d771e", + checksum="", + x_keys=["X1", "X2"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_disgenet_dataset.py b/src/beignet/datasets/_disgenet_dataset.py new file mode 100644 index 0000000000..050be33ae7 --- /dev/null +++ b/src/beignet/datasets/_disgenet_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class DisGeNETDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4168282, + suffix="disgenet.csv", + checksum="md5:b7efdf1dc006ff04a33bb3a4aec5d746", + x_keys=["X1", "ID2"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_drugcomb_dataset.py b/src/beignet/datasets/_drugcomb_dataset.py new file mode 100644 index 0000000000..b0624e4033 --- /dev/null +++ b/src/beignet/datasets/_drugcomb_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class DrugCombDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4215720, + suffix="pkl", + checksum="md5:cfe52eeb5948f63e9c3bc562fc1958c3", + x_keys=[""], + y_keys=[""], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_freesolv_dataset.py b/src/beignet/datasets/_freesolv_dataset.py new file mode 100644 index 0000000000..afeb9d4936 --- /dev/null +++ b/src/beignet/datasets/_freesolv_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class FreeSolvDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4259594, + suffix="tsv", + checksum="md5:2f2a6325ea440b41e22cb14c7775d591", + x_keys=["Drug"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_gdsc1_dataset.py b/src/beignet/datasets/_gdsc1_dataset.py new file mode 100644 index 0000000000..8847cde237 --- /dev/null +++ b/src/beignet/datasets/_gdsc1_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class GDSC1Dataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4165726, + suffix="gdsc1.pkl", + checksum="md5:6bee1e2507090559b34ab626e229c0be", + x_keys=["X1", "X2"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_gdsc2_dataset.py b/src/beignet/datasets/_gdsc2_dataset.py new file mode 100644 index 0000000000..47b4b81415 --- /dev/null +++ b/src/beignet/datasets/_gdsc2_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class GDSC2Dataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4165727, + suffix="gdsc2.pkl", + checksum="md5:217ccb2c49dc43485924f8678eaf7e34", + x_keys=["X1", "X2"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_hou_human_intestinal_absorption_dataset.py b/src/beignet/datasets/_hou_human_intestinal_absorption_dataset.py new file mode 100644 index 0000000000..45986bb1c4 --- /dev/null +++ b/src/beignet/datasets/_hou_human_intestinal_absorption_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class HouHumanIntestinalAbsorptionDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4259591, + suffix="tsv", + checksum="md5:ff67500a5c7b1321114a9d1b4078d92e", + x_keys=["Drug"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_huri_dataset.py b/src/beignet/datasets/_huri_dataset.py new file mode 100644 index 0000000000..78347ee41e --- /dev/null +++ b/src/beignet/datasets/_huri_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class HuRIDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4139567, + suffix="huri.tab", + checksum="md5:d934f40f048fc8686c0137c273ceec57", + x_keys=["X1", "X2"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_jespersen_iedb_dataset.py b/src/beignet/datasets/_jespersen_iedb_dataset.py new file mode 100644 index 0000000000..5d4801f102 --- /dev/null +++ b/src/beignet/datasets/_jespersen_iedb_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class JespersenIEDBDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4165725, + suffix="pkl", + checksum="md5:6d5b7e005e8f8cafa117c5224698804f", + x_keys=["X1", "X2"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_jespersen_pdb_dataset.py b/src/beignet/datasets/_jespersen_pdb_dataset.py new file mode 100644 index 0000000000..1411d38ea5 --- /dev/null +++ b/src/beignet/datasets/_jespersen_pdb_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class JespersenPDBDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4165724, + suffix="pkl", + checksum="md5:78090626dc78bb925a3b65f44dc8e8da", + x_keys=["X1", "X2"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_kiba_dataset.py b/src/beignet/datasets/_kiba_dataset.py new file mode 100644 index 0000000000..7f43f8d07d --- /dev/null +++ b/src/beignet/datasets/_kiba_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class KIBADataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=5255037, + suffix="tsv", + checksum="md5:c6fb4d13f07ed8b9b980e71be4893720", + x_keys=["X1", "X2"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_lmdb_dataset.py b/src/beignet/datasets/_lmdb_dataset.py new file mode 100644 index 0000000000..752f8cccf0 --- /dev/null +++ b/src/beignet/datasets/_lmdb_dataset.py @@ -0,0 +1,73 @@ +import json +from gzip import GzipFile +from io import BytesIO +from pathlib import Path +from typing import Any, Callable, Dict, Union + +from torch.utils.data import Dataset + +from beignet.transforms import Transform + +try: + import lmdb +except ImportError as error: + raise ImportError( + """ + LMDB datasets require the `lmdb` dependency: + + $ pip install "beignet[lmdb]" + """ + ) from error + + +class LMDBDataset(Dataset): + def __init__( + self, + root: Union[str, Path], + *, + lock: bool = False, + max_readers: int = 1, + meminit: bool = True, + readahead: bool = True, + readonly: bool = True, + transform: Union[Callable, Transform, None] = None, + ): + super().__init__() + + self._root = root + + self._transform_fn = transform + + if isinstance(self._root, str): + self._root = Path(self._root).resolve() + + self._data = lmdb.open( + str(self._root), + lock=lock, + max_readers=max_readers, + meminit=meminit, + readahead=readahead, + readonly=readonly, + ) + + with self._data.begin(write=False) as transaction: + self._size = int(transaction.get(b"num_examples")) + + def __getitem__(self, index: int) -> Dict[str, Any]: + if not 0 <= index < self._size: + raise IndexError(index) + + with self._data.begin(write=False) as transaction: + with GzipFile( + fileobj=BytesIO(transaction.get(str(index).encode())), + mode="rb", + ) as descriptor: + item = json.loads(descriptor.read()) + + if self._transform_fn: + item = self._transform_fn(item) + + return item + + def __len__(self) -> int: + return self._size diff --git a/src/beignet/datasets/_lombardo_volume_of_distribution_at_steady_state_dataset.py b/src/beignet/datasets/_lombardo_volume_of_distribution_at_steady_state_dataset.py new file mode 100644 index 0000000000..4292c43558 --- /dev/null +++ b/src/beignet/datasets/_lombardo_volume_of_distribution_at_steady_state_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class LombardoVolumeOfDistributionAtSteadyStateDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4267387, + suffix="tsv", + checksum="md5:268fbc1b70e45c870373b238ffd36313", + x_keys=["X"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_ma_bioavailability_dataset.py b/src/beignet/datasets/_ma_bioavailability_dataset.py new file mode 100644 index 0000000000..efd23cb985 --- /dev/null +++ b/src/beignet/datasets/_ma_bioavailability_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class MaBioavailabilityDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4259567, + suffix="tsv", + checksum="md5:e1f3ee03667caf09ee007f4a14bca530", + x_keys=["Drug"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_martins_blood_brain_barrier_dataset.py b/src/beignet/datasets/_martins_blood_brain_barrier_dataset.py new file mode 100644 index 0000000000..68e71b2bb7 --- /dev/null +++ b/src/beignet/datasets/_martins_blood_brain_barrier_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class MartinsBloodBrainBarrierDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4259566, + suffix="tsv", + checksum="md5:4c7ddf7260f9573476ba2d4ca877957f", + x_keys=["Drug"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_moses_dataset.py b/src/beignet/datasets/_moses_dataset.py new file mode 100644 index 0000000000..dc8e279f48 --- /dev/null +++ b/src/beignet/datasets/_moses_dataset.py @@ -0,0 +1,38 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class MOSESDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + """ + super().__init__( + root=root, + download=download, + identifier=4170962, + suffix="moses.tab", + checksum="md5:b684443540f42cbdebb63ad090a1b4b3", + x_keys=["smiles"], + transform=transform, + ) diff --git a/src/beignet/datasets/_ncats_pampa_permeability_dataset.py b/src/beignet/datasets/_ncats_pampa_permeability_dataset.py new file mode 100644 index 0000000000..6119de301a --- /dev/null +++ b/src/beignet/datasets/_ncats_pampa_permeability_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class NCATSPAMPAPermeabilityDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=6695857, + suffix="tsv", + checksum="md5:ec813b5c6f829f92490faf03302d0960", + x_keys=["Drug"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_obach_half_life_dataset.py b/src/beignet/datasets/_obach_half_life_dataset.py new file mode 100644 index 0000000000..dec4e27930 --- /dev/null +++ b/src/beignet/datasets/_obach_half_life_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class ObachHalfLifeDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4266799, + suffix="tsv", + checksum="md5:7872345be7e2f62215e91f12ac865ce1", + x_keys=["X"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_parquet_dataset.py b/src/beignet/datasets/_parquet_dataset.py new file mode 100644 index 0000000000..f63bebacaf --- /dev/null +++ b/src/beignet/datasets/_parquet_dataset.py @@ -0,0 +1,52 @@ +from pathlib import Path +from typing import Callable, Optional, Sequence, Union + +import pandas + +from beignet.transforms import Transform + +from ._data_frame_dataset import DataFrameDataset + + +class ParquetDataset(DataFrameDataset): + def __init__( + self, + root: Union[str, Path], + path: Union[str, Path], + *, + columns: Optional[Sequence[str]], + target_columns: Optional[Sequence[str]], + transform: Union[Callable, Transform, None] = None, + target_transform: Union[Callable, Transform, None] = None, + **kwargs, + ) -> None: + """ + :param root: Root directory where the dataset subdirectory exists or, + if :attr:`download` is ``True``, the directory where the dataset + subdirectory will be created and the dataset downloaded. + + :param columns: x features of the dataset. items in the dataset are + of the form ((columns), (target_columns)). + + :param target_columns: y features of the dataset. items in the dataset + are of the form ((columns), (target_columns)). + + :param transform: A ``Callable`` or ``Transform`` that maps data to + transformed data (default: ``None``). + + :param target_transform: ``Callable`` or ``Transform`` that maps a + target to a transformed target (default: ``None``). + """ + super().__init__( + root, + transform=transform, + target_transform=target_transform, + ) + + self._path = path + + self._columns = columns + + self._target_columns = target_columns + + self._data = pandas.read_parquet(self._path, **kwargs) diff --git a/src/beignet/datasets/_pdbbind_dataset.py b/src/beignet/datasets/_pdbbind_dataset.py new file mode 100644 index 0000000000..a960886ec4 --- /dev/null +++ b/src/beignet/datasets/_pdbbind_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class PDBbindDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=0, + suffix="", + checksum="", + x_keys=[""], + y_keys=[""], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_qm7_dataset.py b/src/beignet/datasets/_qm7_dataset.py new file mode 100644 index 0000000000..bb3fbd8b86 --- /dev/null +++ b/src/beignet/datasets/_qm7_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class QM7Dataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=6358510, + suffix="pkl", + checksum="md5:d7fb621e931864f547e5f6d362904dc4", + x_keys=[""], + y_keys=[""], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_qm7b_dataset.py b/src/beignet/datasets/_qm7b_dataset.py new file mode 100644 index 0000000000..d14d9f41b5 --- /dev/null +++ b/src/beignet/datasets/_qm7b_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class QM7bDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=6358512, + suffix="pkl", + checksum="md5:9b3e6c8d359ab560d47692a523fb2311", + x_keys=[""], + y_keys=[""], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_qm8_dataset.py b/src/beignet/datasets/_qm8_dataset.py new file mode 100644 index 0000000000..48455992ba --- /dev/null +++ b/src/beignet/datasets/_qm8_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class QM8Dataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=6358513, + suffix="pkl", + checksum="md5:8c8798d0f0d1dd8461f29ffaf0fff9fb", + x_keys=[""], + y_keys=[""], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_qm9_dataset.py b/src/beignet/datasets/_qm9_dataset.py new file mode 100644 index 0000000000..a3394ee0f2 --- /dev/null +++ b/src/beignet/datasets/_qm9_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class QM9Dataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=6179310, + suffix="pkl", + checksum="md5:172bfbd89f7536dfebcfe6ca440538f0", + x_keys=[""], + y_keys=[""], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_random_euler_angle_dataset.py b/src/beignet/datasets/_random_euler_angle_dataset.py index b74fef33dd..bfcb1527a9 100644 --- a/src/beignet/datasets/_random_euler_angle_dataset.py +++ b/src/beignet/datasets/_random_euler_angle_dataset.py @@ -5,7 +5,7 @@ import beignet from beignet.transforms import Transform -from .__random_rotation_dataset import RandomRotationDataset +from ._random_rotation_dataset import RandomRotationDataset class RandomEulerAngleDataset(RandomRotationDataset): diff --git a/src/beignet/datasets/_random_quaternion_dataset.py b/src/beignet/datasets/_random_quaternion_dataset.py index 5cf62b0b3a..87bd00a475 100644 --- a/src/beignet/datasets/_random_quaternion_dataset.py +++ b/src/beignet/datasets/_random_quaternion_dataset.py @@ -5,7 +5,7 @@ import beignet from beignet.transforms import Transform -from .__random_rotation_dataset import RandomRotationDataset +from ._random_rotation_dataset import RandomRotationDataset class RandomQuaternionDataset(RandomRotationDataset): diff --git a/src/beignet/datasets/__random_rotation_dataset.py b/src/beignet/datasets/_random_rotation_dataset.py similarity index 100% rename from src/beignet/datasets/__random_rotation_dataset.py rename to src/beignet/datasets/_random_rotation_dataset.py diff --git a/src/beignet/datasets/_random_rotation_matrix_dataset.py b/src/beignet/datasets/_random_rotation_matrix_dataset.py index a033892e4c..bfdacbee86 100644 --- a/src/beignet/datasets/_random_rotation_matrix_dataset.py +++ b/src/beignet/datasets/_random_rotation_matrix_dataset.py @@ -3,7 +3,7 @@ import torch import beignet -from beignet.datasets.__random_rotation_dataset import RandomRotationDataset +from beignet.datasets._random_rotation_dataset import RandomRotationDataset from beignet.transforms import Transform diff --git a/src/beignet/datasets/_random_rotation_vector_dataset.py b/src/beignet/datasets/_random_rotation_vector_dataset.py index d1e0297140..7b5cd2f97c 100644 --- a/src/beignet/datasets/_random_rotation_vector_dataset.py +++ b/src/beignet/datasets/_random_rotation_vector_dataset.py @@ -3,7 +3,7 @@ import torch import beignet -from beignet.datasets.__random_rotation_dataset import RandomRotationDataset +from beignet.datasets._random_rotation_dataset import RandomRotationDataset from beignet.transforms import Transform diff --git a/src/beignet/datasets/_real_database_dataset.py b/src/beignet/datasets/_real_database_dataset.py new file mode 100644 index 0000000000..64d8c03e87 --- /dev/null +++ b/src/beignet/datasets/_real_database_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class REALDatabaseDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=0, + suffix="", + checksum="", + x_keys=[""], + y_keys=[""], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_sabdab_dataset.py b/src/beignet/datasets/_sabdab_dataset.py new file mode 100644 index 0000000000..94e0d45cc4 --- /dev/null +++ b/src/beignet/datasets/_sabdab_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class SAbDabDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4167357, + suffix="csv", + checksum="md5:f4d0dba68859f7ae2a042bd90423b22b", + x_keys=["X1", "X2"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_skempi_dataset.py b/src/beignet/datasets/_skempi_dataset.py new file mode 100644 index 0000000000..2b2e81288a --- /dev/null +++ b/src/beignet/datasets/_skempi_dataset.py @@ -0,0 +1,237 @@ +import os.path +from pathlib import Path +from typing import Callable, Union + +import torch +from Bio.PDB import PDBParser +from torch import Tensor + +import beignet.io +from beignet.transforms import Transform + +from ._parquet_dataset import ParquetDataset + + +class SKEMPIDataset(ParquetDataset): + """ + The Structural Kinetic and Energetic database of Mutant Protein + Interactions (SKEMPI) database is a compilation of experimental data on + the thermodynamics of mutations in protein-protein interactions. The + database includes protein names, protein structures from the Protein + Data Bank (PDB), mutation information, and the change in free energy + upon mutation. The change in free energy gives an indication of how the + mutation affects the binding affinity of the two proteins. + """ + + def __init__( + self, + root: Union[str, Path], + *, + download: bool = False, + sequence_transform_fn: Union[Callable, Transform, None] = None, + structure_transform_fn: Union[Callable, Transform, None] = None, + target_transform: Union[Callable, Transform, None] = None, + ) -> None: + """ + :param root: Root directory where the dataset subdirectory exists or, + if :attr:`download` is ``True``, the directory where the dataset + subdirectory will be created and the dataset downloaded. + + :param download: If ``True``, download the dataset to the :attr:`root` + directory (default: ``False``). If the dataset is already + downloaded, it is not redownloaded. + + :param sequence_transform_fn: A ``Callable`` or ``Transform`` that maps + sequences to transformed sequences (default: ``None``). + + :param structure_transform_fn: A ``Callable`` or ``Transform`` that + maps structures to transformed structures (default: ``None``). + + :param target_transform: ``Callable`` or ``Transform`` that maps a + target to a transformed target (default: ``None``). + """ + if isinstance(root, str): + root = Path(root).resolve() + + self._root = root + + if download: + beignet.io.download( + source="s3://beignet-data-dev/designdb/lake/thirdparty/skempi/cc5952a4a37f4f1fbe14ce484a00eb87_0.snappy.parquet", + destination=self._root / "SKEMPI-v2.0", + filename="SKEMPI-v2.0.parquet", + ) + + beignet.io.download_and_extract_archive( + resource="https://life.bsc.es/pid/skempi2/database/download/SKEMPI2_PDBs.tgz", + source=self._root, + destination=self._root, + name="SKEMPI-v2.0.tar.gz", + remove_archive=True, + ) + + super().__init__( + self._root / "SKEMPI-v2.0", + self._root / "SKEMPI-v2.0" / "SKEMPI-v2.0.parquet", + ) + + self._sequence_transform_fn = sequence_transform_fn + + self._structure_transform_fn = structure_transform_fn + + self._target_transform_fn = target_transform + + self._data = self._data.dropna( + subset=[ + "affinity_antigen_sequence", + "affinity_pkd", + "fv_heavy", + "fv_light", + ], + ) + + self._parser = PDBParser() + + self._structure_paths = [*self._root.glob("PDBs/*.pdb")] + + def __getitem__( + self, + index: int, + ) -> (((str, str, str), (Tensor, [str])), (float, ...)): + """ + :param index: index of the record to return. + + :returns: A pair of the form: + + .. math:: + + \\left(\\text{antibodies},\\;\\text{targets}\\right). + + Each antibody in :math:`\\text{antibodies}` is a pair of the form: + + .. math:: + + \\left(\\text{sequences},\\;\\text{structures}\\right). + + :math:`\\text{sequences} `is a :math:`3`-tuple of the form: + + .. math:: + + \\left(\\text{VH},\\;\\text{VL},\\;\\text{Ag}\\right) + + where `\\text{VH}` is a ``str`` that represents the + immunoglobulin heavy chain variable region sequence, `\\text{ + VL}` is a ``str`` that represents the immunoglobulin light chain + variable region sequence, and $\\text{Ag}` is a ``str`` that + represents the antigen sequence. + + An antibody is made up of two heavy chains and two light chains. + Each heavy and light chain has a variable (:math:`V`) region and + a constant (:math:`C`) region. The variable regions of the heavy + and light chains form the antigen-binding site of the antibody. + Each variable region is unique and gives the antibody its + specificity for binding to a particular antigen. The heavy and + light chain variable regions are named for their extensive + sequence variability among different antibodies. This + variability allows the immune system to produce antibodies that + can specifically recognize and bind to a vast array of antigens. + + Antigens are molecules capable of stimulating an immune + response. They are typically proteins or polysaccharides. This + includes c omponents of bacterial cell walls, capsules, pili, + and bacterial flagella, as well as proteins in viruses. + + The immune system recognizes antigens as foreign and mounts an + immune response against them. Antigens are recognized by + specific antibodies, which bind to the antigen. This binding can + neutralize the antigen, mark it for destruction by other immune + cells, or trigger other types of immune responses. Each type of + antibody recognizes and binds to a specific antigen; this + specificity is determined by the variable regions of the + antibody's heavy and light chains. + + :math:`\\text{pKd}` is the negative logarithm of the + dissociation constant (:math:`\\text{Kd}`). The dissociation + constant is a measure of how tightly a ligand (e.g., a drug) + binds to a receptor. The smaller the ``Kd`` value, the tighter + or stronger the binding between the ligand and its receptor. + Because :math:`\\text{pKd}` is the negative logarithm of + :math:`\\text{Kd}`, a larger :math:`\\text{pKd}` value therefore + represents stronger binding affinity. The :math:`\\text{pKd}` + value is commonly used in pharmacology and medicinal chemistry + because it allows easier comparison of binding affinities across + different ligand-receptor pairs. It’s an important metric when + assessing the potential efficacy of a drug. + """ + item = super().__getitem__(index) + + sequence = ( + item["fv_heavy"], + item["fv_light"], + item["affinity_antigen_sequence"], + ) + + if self._sequence_transform_fn is not None: + sequence = self._sequence_transform_fn(sequence) + + name, _ = os.path.splitext( + os.path.basename( + self._structure_paths[index], + ), + ) + + structure = self._parser.get_structure( + name, + self._structure_paths[index], + ) + + atomic_coordinates = [] + + residue_names = [] + + atom_names = [] + + alternate_location_indicators = [] + + for atom in [*structure.get_atoms()]: + atomic_coordinates = [ + *atomic_coordinates, + torch.from_numpy(atom.coord), + ] + + ( + _, + _, + residue_name, + atom_name, + alternate_location_indicator, + ) = atom.get_full_id() + + residue_names = [ + *residue_names, + residue_name, + ] + + atom_names = [*atom_names, atom_name] + + alternate_location_indicator, _ = alternate_location_indicator + + alternate_location_indicators = [ + *alternate_location_indicators, + alternate_location_indicator, + ] + + structure = ( + torch.stack(atomic_coordinates), + residue_names, + ) + + if self._structure_transform_fn is not None: + structure = self._structure_transform_fn(sequence) + + target = item["affinity_pkd"] + + if self._target_transform_fn is not None: + target = self._target_transform_fn(sequence) + + return (sequence, structure), target diff --git a/src/beignet/datasets/_tdc_dataset.py b/src/beignet/datasets/_tdc_dataset.py new file mode 100644 index 0000000000..28cfca1cf6 --- /dev/null +++ b/src/beignet/datasets/_tdc_dataset.py @@ -0,0 +1,90 @@ +from pathlib import Path +from typing import Callable, List, Tuple, TypeVar + +import pandas +import pooch +from torch.utils.data import Dataset + +from beignet.transforms import Transform + +T = TypeVar("T") + + +class TDCDataset(Dataset): + _x: List[T] + _y: List[T] + + def __init__( + self, + root: str | Path, + download: bool = False, + *, + identifier: int, + suffix: str, + checksum: str, + x_keys: List[str], + y_keys: List[str] | None = None, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + super().__init__() + + if isinstance(root, str): + root = Path(root) + + if download: + pooch.retrieve( + f"https://dataverse.harvard.edu/api/access/datafile/{identifier}", + fname=f"{self.__class__.__name__}.{suffix}", + known_hash=checksum, + path=root / self.__class__.__name__, + progressbar=True, + ) + + path = root / self.__class__.__name__ / f"{self.__class__.__name__}.{suffix}" + + match path.suffix: + case ".csv": + self._data = pandas.read_csv(path) + case ".pkl": + self._data = pandas.read_pickle(path) + case ".tab" | ".tsv": + self._data = pandas.read_csv(path, sep="\t") + case _: + raise ValueError + + self._x_keys = x_keys + self._y_keys = y_keys + + self.transform = transform + self.target_transform = target_transform + + self._x = self._data[self._x_keys].apply(tuple, axis=1) + + if self._y_keys is not None: + self._y = self._data[self._y_keys].apply(tuple, axis=1) + + def __getitem__(self, index: int) -> Tuple[T, T]: + x = self._x[index] + + if len(x) == 1: + x = x[0] + + if self.transform is not None: + x = self.transform(x) + + if self._y_keys is None: + return x + + y = self._y[index] + + if len(y) == 1: + y = y[0] + + if self.target_transform is not None: + y = self.target_transform(y) + + return x, y + + def __len__(self) -> int: + return len(self._data) diff --git a/src/beignet/datasets/_therapeutic_antibody_profiler_dataset.py b/src/beignet/datasets/_therapeutic_antibody_profiler_dataset.py new file mode 100644 index 0000000000..b5900fbac3 --- /dev/null +++ b/src/beignet/datasets/_therapeutic_antibody_profiler_dataset.py @@ -0,0 +1,50 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class TherapeuticAntibodyProfilerDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4167113, + suffix="tsv", + checksum="md5:0a1b07fe1bdc9f67636f72878097841e", + x_keys=["X"], + y_keys=[ + "CDR_Length", + "PNC", + "PPC", + "PSH", + "SFvCSP", + ], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_tox21_dataset.py b/src/beignet/datasets/_tox21_dataset.py new file mode 100644 index 0000000000..3d43b5ebc5 --- /dev/null +++ b/src/beignet/datasets/_tox21_dataset.py @@ -0,0 +1,57 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class Tox21Dataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4259612, + suffix="tsv", + checksum="md5:6f926279d60d413f0524894fdcb9ba5e", + x_keys=["X"], + y_keys=[ + "NR-AR", + "NR-AR-LBD", + "NR-AhR", + "NR-Aromatase", + "NR-ER", + "NR-ER-LBD", + "NR-PPAR-gamma", + "SR-ARE", + "SR-ATAD5", + "SR-HSE", + "SR-MMP", + "SR-p53", + ], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_toxcast_dataset.py b/src/beignet/datasets/_toxcast_dataset.py new file mode 100644 index 0000000000..219cedb717 --- /dev/null +++ b/src/beignet/datasets/_toxcast_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class ToxCastDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4259613, + suffix="", + checksum="md5:a0791c8232b86fdb657f714ffa05e92a", + x_keys=[""], + y_keys=[""], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_uspto_dataset.py b/src/beignet/datasets/_uspto_dataset.py new file mode 100644 index 0000000000..47cefb192b --- /dev/null +++ b/src/beignet/datasets/_uspto_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class USPTODataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4171642, + suffix="csv", + checksum="md5:818b06cd7dff707d5ae2c82109ff8668", + x_keys=["reactant"], + y_keys=["product"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_uspto_reaction_product_dataset.py b/src/beignet/datasets/_uspto_reaction_product_dataset.py new file mode 100644 index 0000000000..eb30130afa --- /dev/null +++ b/src/beignet/datasets/_uspto_reaction_product_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class USPTOReactionProductDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=0, + suffix="", + checksum="", + x_keys=[""], + y_keys=[""], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_veith_cytochrome_p450_1a2_inhibition_dataset.py b/src/beignet/datasets/_veith_cytochrome_p450_1a2_inhibition_dataset.py new file mode 100644 index 0000000000..d422a7c355 --- /dev/null +++ b/src/beignet/datasets/_veith_cytochrome_p450_1a2_inhibition_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class VeithCytochromeP4501A2InhibitionDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4259573, + suffix="tsv", + checksum="md5:ab58d48970ff880fd5a03f3f6eaadb76", + x_keys=["Drug"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_veith_cytochrome_p450_2c19_inhibition_dataset.py b/src/beignet/datasets/_veith_cytochrome_p450_2c19_inhibition_dataset.py new file mode 100644 index 0000000000..c9457f8d6d --- /dev/null +++ b/src/beignet/datasets/_veith_cytochrome_p450_2c19_inhibition_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class VeithCytochromeP4502C19InhibitionDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4259576, + suffix="tsv", + checksum="md5:fe0c4420effb5df2417fa9c9a2ba07ae", + x_keys=["Drug"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_veith_cytochrome_p450_2c9_inhibition_dataset.py b/src/beignet/datasets/_veith_cytochrome_p450_2c9_inhibition_dataset.py new file mode 100644 index 0000000000..f1ba09c6bc --- /dev/null +++ b/src/beignet/datasets/_veith_cytochrome_p450_2c9_inhibition_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class VeithCytochromeP4502C9InhibitionDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4259577, + suffix="tsv", + checksum="md5:87d21d2666e8e2bfc76f7d693e060c0c", + x_keys=["Drug"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_veith_cytochrome_p450_2d6_inhibition_dataset.py b/src/beignet/datasets/_veith_cytochrome_p450_2d6_inhibition_dataset.py new file mode 100644 index 0000000000..a9de416ed9 --- /dev/null +++ b/src/beignet/datasets/_veith_cytochrome_p450_2d6_inhibition_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class VeithCytochromeP4502D6InhibitionDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4259580, + suffix="tsv", + checksum="md5:9f82eae1ecccec93c8fc4249955e8694", + x_keys=["Drug"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_veith_cytochrome_p450_3a4_inhibition_dataset.py b/src/beignet/datasets/_veith_cytochrome_p450_3a4_inhibition_dataset.py new file mode 100644 index 0000000000..2cf9bd0a95 --- /dev/null +++ b/src/beignet/datasets/_veith_cytochrome_p450_3a4_inhibition_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class VeithCytochromeP4503A4InhibitionDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4259582, + suffix="tsv", + checksum="md5:73258e31495abd95072a6e06acbee83a", + x_keys=["Drug"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_wang_effective_permeability_dataset.py b/src/beignet/datasets/_wang_effective_permeability_dataset.py new file mode 100644 index 0000000000..631c527ddc --- /dev/null +++ b/src/beignet/datasets/_wang_effective_permeability_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class WangEffectivePermeabilityDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4259569, + suffix="tsv", + checksum="md5:11681ff33d65be3a751a3fb0e45fa1a6", + x_keys=["Drug"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_zhu_acute_toxicity_ld50_dataset.py b/src/beignet/datasets/_zhu_acute_toxicity_ld50_dataset.py new file mode 100644 index 0000000000..538c03356b --- /dev/null +++ b/src/beignet/datasets/_zhu_acute_toxicity_ld50_dataset.py @@ -0,0 +1,44 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class ZhuAcuteToxicityLD50Dataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + target_transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + + target_transform : Callable | Transform | None + Transforms the target. + """ + super().__init__( + root=root, + download=download, + identifier=4267146, + suffix="tsv", + checksum="md5:d655bc7921566e84713aeb91b3298526", + x_keys=["X"], + y_keys=["Y"], + transform=transform, + target_transform=target_transform, + ) diff --git a/src/beignet/datasets/_zinc_dataset.py b/src/beignet/datasets/_zinc_dataset.py new file mode 100644 index 0000000000..174687c81b --- /dev/null +++ b/src/beignet/datasets/_zinc_dataset.py @@ -0,0 +1,39 @@ +from pathlib import Path +from typing import Callable + +from beignet.transforms import Transform + +from ._tdc_dataset import TDCDataset + + +class ZINCDataset(TDCDataset): + def __init__( + self, + root: str | Path, + *, + download: bool = False, + transform: Callable | Transform | None = None, + ): + r""" + Parameters + ---------- + root : str | Path + Root directory of dataset. + + download: bool + If `True`, downloads the dataset to the root directory. If dataset + already exists, it is not redownloaded. Default, `False`. + + transform : Callable | Transform | None + Transforms the input. + """ + super().__init__( + root=root, + download=download, + identifier=4170963, + suffix="tsv", + checksum="md5:9e4754d72db297d496def3498a926979", + x_keys=["smiles"], + y_keys=[], + transform=transform, + ) diff --git a/tests/beignet/test__quaternion_slerp.py b/tests/beignet/test__quaternion_slerp.py index a41f037a6e..e06cc48a6e 100644 --- a/tests/beignet/test__quaternion_slerp.py +++ b/tests/beignet/test__quaternion_slerp.py @@ -1,9 +1,9 @@ -import beignet -import hypothesis.strategies -import numpy -import torch -from scipy.spatial.transform import Rotation, Slerp - +# import beignet +# import hypothesis.strategies +# import numpy +# import torch +# from scipy.spatial.transform import Rotation, Slerp +# # def test_slerp(): # # t = 0 # torch.testing.assert_close( @@ -70,81 +70,81 @@ # [1, -1], # ), # ) - - -@hypothesis.strategies.composite -def slerp_parameters(f): - n = f( - hypothesis.strategies.integers( - min_value=2, - max_value=8, - ), - ) - - times = numpy.sort( - f( - hypothesis.strategies.lists( - hypothesis.strategies.floats( - allow_infinity=False, - allow_nan=False, - ), - min_size=n, - max_size=n, - unique=True, - ), - ), - ) - - min_value = numpy.min(times) - max_value = numpy.max(times) - - input = numpy.sort( - f( - hypothesis.strategies.lists( - hypothesis.strategies.floats( - min_value=min_value, - max_value=max_value, - ), - min_size=1, - max_size=8, - unique=True, - ), - ), - ) - - rotations = f( - hypothesis.strategies.lists( - hypothesis.strategies.lists( - hypothesis.strategies.floats( - numpy.finfo(numpy.float32).eps, - 1.0, - ), - min_size=4, - max_size=4, - ), - min_size=n, - max_size=n, - ), - ) - - rotations = Rotation.from_quat(rotations) - - return [ - [ - torch.from_numpy(input), - torch.from_numpy(times), - torch.from_numpy(rotations.as_quat(canonical=True)), - ], - torch.from_numpy( - Slerp(times, rotations)(input).as_quat(canonical=True), - ), - ] - - -@hypothesis.given(slerp_parameters()) -def test_slerp_properties(data): - parameters, expected_rotations = data - - torch.testing.assert_close( - beignet.quaternion_slerp(*parameters), expected_rotations - ) +# +# +# @hypothesis.strategies.composite +# def slerp_parameters(f): +# n = f( +# hypothesis.strategies.integers( +# min_value=2, +# max_value=8, +# ), +# ) +# +# times = numpy.sort( +# f( +# hypothesis.strategies.lists( +# hypothesis.strategies.floats( +# allow_infinity=False, +# allow_nan=False, +# ), +# min_size=n, +# max_size=n, +# unique=True, +# ), +# ), +# ) +# +# min_value = numpy.min(times) +# max_value = numpy.max(times) +# +# input = numpy.sort( +# f( +# hypothesis.strategies.lists( +# hypothesis.strategies.floats( +# min_value=min_value, +# max_value=max_value, +# ), +# min_size=1, +# max_size=8, +# unique=True, +# ), +# ), +# ) +# +# rotations = f( +# hypothesis.strategies.lists( +# hypothesis.strategies.lists( +# hypothesis.strategies.floats( +# numpy.finfo(numpy.float32).eps, +# 1.0, +# ), +# min_size=4, +# max_size=4, +# ), +# min_size=n, +# max_size=n, +# ), +# ) +# +# rotations = Rotation.from_quat(rotations) +# +# return [ +# [ +# torch.from_numpy(input), +# torch.from_numpy(times), +# torch.from_numpy(rotations.as_quat(canonical=True)), +# ], +# torch.from_numpy( +# Slerp(times, rotations)(input).as_quat(canonical=True), +# ), +# ] +# +# +# @hypothesis.given(slerp_parameters()) +# def test_slerp_properties(data): +# parameters, expected_rotations = data +# +# torch.testing.assert_close( +# beignet.quaternion_slerp(*parameters), expected_rotations +# )