diff --git a/.commitlint.rules.js b/.commitlint.rules.js new file mode 100644 index 00000000..bb85d825 --- /dev/null +++ b/.commitlint.rules.js @@ -0,0 +1,13 @@ +module.exports = { + rules: { + "header-max-length": [2, "always", 65], + "subject-case": [0, "always", "sentence-case"], + "scope-enum": [2, "always", ["all", "core", "fields", "galaxies", + "lensing", "math", "observations", "points", + "shapes", "shells", "user"]], + "scope-case": [0, "always", "lower-case"], + "type-enum": [2, "always", ["API", "BUG", "DEP", "DEV", "DOC", "ENH", + "MNT", "REV", "STY", "TST", "TYP", "REL"]], + "type-case": [0, "always", "upper-case"], + } +} diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..e7866dee --- /dev/null +++ b/.flake8 @@ -0,0 +1,2 @@ +[flake8] +ignore = E226,E501,E741 diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 5ace4600..9a11bdb2 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,3 +4,5 @@ updates: directory: "/" schedule: interval: "weekly" + commit-message: + prefix: "DEV" diff --git a/.github/test-constraints.txt b/.github/test-constraints.txt new file mode 100644 index 00000000..70a2563a --- /dev/null +++ b/.github/test-constraints.txt @@ -0,0 +1,2 @@ +--prefer-binary +--only-binary numpy,scipy,healpy,healpix diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml deleted file mode 100644 index 0298b493..00000000 --- a/.github/workflows/build.yml +++ /dev/null @@ -1,17 +0,0 @@ -name: build -on: - push: - branches: - - main - pull_request: - branches: - - main -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - run: pipx run build - - uses: actions/upload-artifact@v3 - with: - path: dist/*.tar.gz diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml deleted file mode 100644 index d7b956ae..00000000 --- a/.github/workflows/publish.yml +++ /dev/null @@ -1,15 +0,0 @@ -name: publish -on: - release: - types: - - published -jobs: - publish: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - run: pipx run build - - uses: pypa/gh-action-pypi-publish@v1.6.4 - with: - user: __token__ - password: ${{ secrets.pypi_token }} diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml new file mode 100644 index 00000000..ed75b134 --- /dev/null +++ b/.github/workflows/pull_request.yml @@ -0,0 +1,28 @@ +name: Pull Request + +on: + pull_request: + branches: + - main + types: + - opened + - edited + - synchronize + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + lint-pr: + name: Formatting + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: CondeNast/conventional-pull-request-action@v0.2.0 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + commitlintRulesPath: ".commitlint.rules.js" + commitTitleMatch: false + ignoreCommits: true diff --git a/.github/workflows/pull_request_review.yml b/.github/workflows/pull_request_review.yml new file mode 100644 index 00000000..4828fd83 --- /dev/null +++ b/.github/workflows/pull_request_review.yml @@ -0,0 +1,19 @@ +name: Pull Request Review + +on: + pull_request_review: + branches: + - main + types: + - submitted + +jobs: + approved: + name: Approved + if: github.event.review.state == 'approved' + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - name: Add Reviewed-By + uses: ntessore/add-reviewed-by-action@v1 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000..98038217 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,20 @@ +name: Release + +on: + release: + types: + - published + +jobs: + publish: + name: Publish on PyPI + runs-on: ubuntu-latest + environment: + name: publish + url: https://pypi.org/p/glass + permissions: + id-token: write + steps: + - uses: actions/checkout@v4 + - run: pipx run build + - uses: pypa/gh-action-pypi-publish@v1.8.14 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 00000000..a8b5636e --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,42 @@ +name: Test + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + style: + name: Style + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - run: pipx run flake8 glass + tests: + name: Tests + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12'] + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + - run: pip install -c .github/test-constraints.txt '.[test]' + - run: pytest --pyargs glass + build: + name: Build + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - run: pipx run build + docs: + name: Docs + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - run: pipx run --spec '.[docs]' sphinx-build -W -b html docs _build/html diff --git a/.github/workflows/tox.yml b/.github/workflows/tox.yml deleted file mode 100644 index bf7326c4..00000000 --- a/.github/workflows/tox.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: tox -on: - push: - branches: - - main - pull_request: - branches: - - main -jobs: - tox: - strategy: - matrix: - toxenv: - - flake8 - - test - - docs - name: ${{ matrix.toxenv }} - runs-on: ubuntu-latest - steps: - - name: Checkout Repository - uses: actions/checkout@v3 - - name: Install Python - uses: actions/setup-python@v4 - with: - python-version: '3.7' - - name: Install tox - run: | - pip install tox - - name: Run ${{ matrix.toxenv }} - run: | - tox -e ${{ matrix.toxenv }} diff --git a/.gitignore b/.gitignore index 71d36005..e681196d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,8 @@ +glass/_version.py + .*.swp .DS_Store __pycache__ docs/_build -.tox build dist diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 63486184..1b001646 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,4 +2,6 @@ repos: - repo: https://github.com/pycqa/flake8 rev: 4.0.1 hooks: - - id: flake8 \ No newline at end of file + - id: flake8 + additional_dependencies: + - flake8-print diff --git a/.readthedocs.yml b/.readthedocs.yml index 0f581ebb..98d9e442 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -1,5 +1,10 @@ version: 2 +build: + os: ubuntu-22.04 + tools: + python: "3.11" + python: install: - method: pip diff --git a/CHANGELOG.md b/CHANGELOG.md index 822309df..a9473447 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,8 +5,77 @@ All notable changes to the project are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com). -[Unreleased] ------------- +[2023.7] (1 Aug 2023) +---------------------- + +### Added + +* Function `getcl()` to return angular power spectra by index from + a list using GLASS ordering. +* New `linear_windows()` and `cubic_windows()` window functions for + shells. + +### Changed + +* The `gaussian_phz()` function now accepts bounds using `lower=` + and `upper=` keyword parameters. +* The `partition()` function now returns an array of weights to + approximate the given function by the windows. + + +[2023.6] (30 Jun 2023) +----------------------- + +### Added + +- `deflect()` applies deflections to positions +- `from_convergence()` returns other lensing fields given the convergence +- A new `glass.ext` namespace, reserved for extensions + +### Changed + +- The `glass` module is no longer a namespace package +- The point sampling functions `positions_from_delta()` and + `uniform_positions()` now return an iterator +- `ellipticity_gaussian()` and `ellipticity_intnorm()` accept array inputs +- Use pyproject.toml for packaging + +### Deprecated + +- `shear_from_convergence()` is deprecated in favour of `from_convergence()` + +### Removed + +- The `glass.all` meta-module is no longer necessary + +### Fixed + +- Incorrect extrapolation in `glass.core.array.trapz_product()`, causing a bug + in `glass.points.effective_bias()` + + +[2023.5] (31 May 2023) +----------------------- + +### Added + +- Allow dimensional input to the sampling functions in `glass.points` (#80) +- The `redshifts_from_nz()` function supports `count` arrays (#83) + +### Changed + +- Position sampling returns counts alongside points (#80) +- `redshifts_from_nz()` no longer returns `gal_pop` (#83) +- Move core functionality that is used by other, user-facing modules into the + `glass.core` module (#88) + +### Removed + +- Remove profiling functions (#89) + + +[2023.2] - 1 Mar 2023 +--------------------- ### Added @@ -46,13 +115,16 @@ based on [Keep a Changelog](https://keepachangelog.com). `glass.shells` module. -[2023.1] - 2023-01-31 ---------------------- +[2023.1] - 31 Jan 2023 +---------------------- ### Added - Initial wide release for GLASS paper -[Unreleased]: https://github.com/glass-dev/glass/compare/v2023.1...HEAD +[2023.7]: https://github.com/glass-dev/glass/compare/v2023.6...v2023.7 +[2023.6]: https://github.com/glass-dev/glass/compare/v2023.5...v2023.6 +[2023.5]: https://github.com/glass-dev/glass/compare/v2023.2...v2023.5 +[2023.2]: https://github.com/glass-dev/glass/compare/v2023.1...v2023.2 [2023.1]: https://github.com/glass-dev/glass/releases/tag/v2023.1 diff --git a/CITATION.md b/CITATION.md index 64f8444c..8641c686 100644 --- a/CITATION.md +++ b/CITATION.md @@ -4,23 +4,26 @@ Citation If you use GLASS simulations or the GLASS library in your research, please cite the GLASS paper in your publications. -Here is the bibliography entry from [NASA/ADS](https://ui.adsabs.harvard.edu/abs/2023arXiv230201942T): +Here is the bibliography entry from [NASA/ADS]: ```bib -@ARTICLE{2023arXiv230201942T, - author = {{Tessore}, Nicolas and {Loureiro}, Arthur and {Joachimi}, Benjamin and {von Wietersheim-Kramsta}, Maximilian}, +@ARTICLE{2023OJAp....6E..11T, + author = {{Tessore}, Nicolas and {Loureiro}, Arthur and {Joachimi}, Benjamin and {von Wietersheim-Kramsta}, Maximilian and {Jeffrey}, Niall}, title = "{GLASS: Generator for Large Scale Structure}", - journal = {arXiv e-prints}, + journal = {The Open Journal of Astrophysics}, keywords = {Astrophysics - Cosmology and Nongalactic Astrophysics}, year = 2023, - month = feb, - eid = {arXiv:2302.01942}, - pages = {arXiv:2302.01942}, - doi = {10.48550/arXiv.2302.01942}, + month = mar, + volume = {6}, + eid = {11}, + pages = {11}, + doi = {10.21105/astro.2302.01942}, archivePrefix = {arXiv}, eprint = {2302.01942}, primaryClass = {astro-ph.CO}, - adsurl = {https://ui.adsabs.harvard.edu/abs/2023arXiv230201942T}, + adsurl = {https://ui.adsabs.harvard.edu/abs/2023OJAp....6E..11T}, adsnote = {Provided by the SAO/NASA Astrophysics Data System} } ``` + +[NASA/ADS]: https://ui.adsabs.harvard.edu/abs/2023OJAp....6E..11T diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0ec4bd8d..cad246b3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,31 +1,84 @@ Contributing guidelines ======================= -Committing ----------- +Workflow +-------- + +Every change to the repository should come out of an issue where the change is +discussed. + +[Pull requests](#pull-requests) should always follow from the discussion in an +existing issue. The only exception are minor, obvious changes such as fixing +typos in the documentation. + +The discussion in a pull request should only be about the low-level details of +its implementation. All high-level, conceptual discussion belongs to the issue +to which the pull request refers. + + +Pull requests +------------- + +Pull requests to the `main` branch should have titles of the following form: + + TYPE: Subject line + +The title can optionally refer to the module which is being changed: + + TYPE(module): Subject line + +The `TYPE` prefix should indicate the nature of the change and must be taken +from the following list: -Commits to the `main` branch should have messages that start with one of the -following prefixes to indicate the nature of the commit: + API -- an (incompatible) API change + BUG -- bug fix + DEP -- deprecate something, or remove a deprecated object + DEV -- development infrastructure (tools, packaging, etc.) + DOC -- documentation + ENH -- enhancement + MNT -- maintenance commit (refactoring, typos, etc.) + REV -- revert an earlier commit + STY -- style fix (whitespace, PEP8) + TST -- addition or modification of tests + TYP -- static typing + REL -- related to releasing GLASS - API: an (incompatible) API change - BUG: bug fix - DEP: deprecate something, or remove a deprecated object - DOC: documentation - ENH: enhancement - INF: project infrastructure (dev tools, packaging, etc.) - MNT: maintenance commit (refactoring, typos, etc.) - REV: revert an earlier commit - STY: style fix (whitespace, PEP8) - TST: addition or modification of tests - TYP: static typing - REL: related to releasing GLASS +The optional `module` tag should indicate which modules are affected by the +change, and refer to an existing module name. -Note that these apply to a commit that makes it into the `main` branch; for a -pull request on GitHub, that is the eventually squashed and merged commit. -The individual commits in a pull request can have arbitrary commit messages. +The body of the pull request should contain a description of the changes, and +any relevant details or caveats of the implementation. -Pull requests on GitHub should have a label that matches the above prefixes, in -addition to any other applicable label (e.g. affected modules). +The pull request should not repeat or summarise the discussion of its +associated issue. Instead, it should link to the issue using git's so-called +"trailers". These are lines of the form `key: value` which are at the end of +the pull request description, separated from the message body by a blank line. + +To generically refer to an issue without any further action, use `Refs` and +one or more GitHub issue numbers: + + Refs: #12 + Refs: #25, #65 + +To indicate that the pull request shall close an open issue, use `Closes` and +a single GitHub issue number: + + Closes: #17 + +Changelog entries are collected using the following trailers, and later parsed +into the [changelog](CHANGELOG.md) for the next release: + + Added: Some new feature + Changed: Some change in existing functionality + Deprecated: Some soon-to-be removed feature + Removed: Some now removed feature + Fixed: Some bug fix + Security: Some vulnerability was fixed + +You can use any of the other common git trailers. In particular, you can use +`Cc` to notify others of your pull request via their GitHub user names: + + Cc: @octocat Versioning @@ -34,14 +87,8 @@ Versioning The target is to have a new *GLASS* release at the beginning of each month, as long as there have been changes. -As soon as a new version has been released, the package information on the -`main` branch should be updated to the in-development version number -`yyyy.mm.dev0`. - -Each breaking change should increment the in-development version number, e.g. -from `.dev0` to `.dev1`. This is so that extension packages can catch up to -the core library at their own pace, and depend on the correct in-development -version. +The current version number is automatically inferred from the last release +(i.e. git tag), subsequent unreleased commits, and local changes, if any. Releasing @@ -50,11 +97,9 @@ Releasing To release a new version of *GLASS*, there should be a commit titled `REL: glass yyyy.mm` that includes the following changes: -* The version of the `glass` core library is changed from `yyyy.mm.devN` to - `yyyy.mm`. -* The current `Unreleased` section in the [changelog](CHANGELOG.md) is renamed - to `yyyy.mm (DD Mon YYYY)` and a new "Unreleased" section is started. The - links to changesets at the bottom of the file have to be updated accordingly. +* The changelog trailers since the last release are parsed into the + [changelog](CHANGELOG.md) under a section titled `[yyyy.mm] (DD Mon YYYY)`. + A new link to the changeset is added at the bottom of the file. * The [release notes](docs/manual/releases.rst) are updated with the new version. The release notes should translate the changelog entries into prose that can be understood by non-developer users of the code. If there @@ -68,10 +113,6 @@ the release should be a copy of its release note. Creating the release will automatically start the build process that uploads Python packages for the new version to PyPI. -Immediately after the release has been created, a new commit should increase -the minor version number and start a new development version (see -[versioning](#versioning)). - If any *GLASS* extension packages depend on the new release, new versions of these packages should be produced as soon as the new release is published to PyPI. diff --git a/README.md b/README.md index bead33f8..819f74c8 100644 --- a/README.md +++ b/README.md @@ -5,8 +5,8 @@ [![Documentation](https://readthedocs.org/projects/glass/badge/?version=latest)](https://glass.readthedocs.io/en/latest/) [![PyPI](https://img.shields.io/pypi/v/glass)](https://pypi.org/project/glass) [![arXiv](https://img.shields.io/badge/arXiv-2302.01942-red)](https://arxiv.org/abs/2302.01942) -[![adsabs](https://img.shields.io/badge/ads-2023arXiv230201942T-blueviolet)](https://ui.adsabs.harvard.edu/abs/2023arXiv230201942T) -[![doi](https://img.shields.io/badge/doi-10.48550/arXiv.2302.01942-blue)](https://dx.doi.org/10.48550/arXiv.2302.01942) +[![adsabs](https://img.shields.io/badge/ads-2023OJAp....6E..11T-blueviolet)](https://ui.adsabs.harvard.edu/abs/2023OJAp....6E..11T) +[![doi](https://img.shields.io/badge/doi-10.21105/astro.2302.01942-blue)](https://dx.doi.org/10.21105/astro.2302.01942) [![Slack](https://img.shields.io/badge/join-Slack-4A154B)](https://glass-dev.github.io/slack) This is the core library for GLASS, the Generator for Large Scale Structure. @@ -44,8 +44,20 @@ e.g. a design decision or API change, you can use our [Discussions] page. We also have a public [Slack workspace] for discussions about the project. +To keep up with new GLASS releases, you can receive GitHub release +notifications from this repository. Alternatively, you can subscribe to our +announcement mailing list, which only receives 1 email/release. To subscribe, +use the [mailing list page], or send an email to `listserv@jiscmail.ac.uk` with +any subject and the following message body: + + subscribe glass + +where `` is your full name, or `ANONYMOUS` if you prefer. You will +be sent a confirmation email in return. + [documentation]: https://glass.readthedocs.io/ [examples]: https://glass.readthedocs.io/projects/examples/ [Discussions]: https://github.com/orgs/glass-dev/discussions [Slack workspace]: https://glass-dev.github.io/slack +[mailing list page]: https://jiscmail.ac.uk/lists/GLASS.html diff --git a/docs/manual/installation.rst b/docs/manual/installation.rst index 792925ea..02cf1555 100644 --- a/docs/manual/installation.rst +++ b/docs/manual/installation.rst @@ -61,3 +61,19 @@ There currently are a fair amount of breaking changes between *GLASS* releases. For production use, it is therefore **strongly** recommended that you install *GLASS* in a clean environment (venv, conda, etc.) and **pin the GLASS version for the duration of your project**. + + +Announcements +------------- + +To keep up with new GLASS releases, you can subscribe to our announcement +mailing list, which only receives 1 email per release. To subscribe, use the +`mailing list page`__, or send an email to ``listserv@jiscmail.ac.uk`` with any +subject and the following message body:: + + subscribe glass + +where ```` is your full name, or ``ANONYMOUS`` if you prefer. You +will be sent a confirmation email in return. + +__ https://jiscmail.ac.uk/lists/GLASS.html diff --git a/docs/manual/releases.rst b/docs/manual/releases.rst index 52e588ee..1d9a1f27 100644 --- a/docs/manual/releases.rst +++ b/docs/manual/releases.rst @@ -1,16 +1,136 @@ -============= Release notes ============= These notes document the changes between individual *GLASS* releases. -2023.1 (31 Jan 2023) -==================== +2023.7 (1 Aug 2023) +------------------- + +* New radial window functions :func:`~glass.shells.linear_windows()` and + :func:`~glass.shells.cubic_windows()`, which correspond to linear and cubic + spline interpolation of radial functions, respectively. These are + overlapping window functions, and it has been difficult to obtain accurate + matter power spectra so far. + +* The :func:`~glass.shells.partition()` function now returns an array of + weights to approximate a given function by the window functions. This is + necessary to obtain an accurate fit of redshift distributions by overlapping + window functions. For example, to get the array of galaxy densities in each + shells from ``dndz``, one would now do:: + + ngal = partition(z, dndz, shells) + +* A new function :func:`~glass.fields.getcl()` was added to return angular + power spectra by index from a list using GLASS ordering. + +* The :func:`~glass.galaxies.gaussian_phz()` function now accepts bounds using + `lower=` and `upper=` keyword parameters. + + +2023.6 (30 Jun 2023) +-------------------- + +- There is some support for simulating the deflections due to weak + gravitational lensing: + + - The :func:`~glass.lensing.deflect` function applies deflections to + positions. + + - The :func:`~glass.lensing.from_convergence` function returns one or more + other lensing fields given the convergence. + + - The ``shear_from_convergence()`` function is deprecated in favour of + ``from_convergence()``. + +- The ``glass`` module is no longer a namespace package. The new ``glass.ext`` + namespace is reserved for extensions instead. This is done to follow best + practices, so that a bad extension can no longer break all of *GLASS* by + mistake. The ``glass.all`` meta-module is no longer necessary. + +- The point sampling functions :func:`~glass.points.positions_from_delta` and + :func:`~glass.points.uniform_positions` now return an iterator over points. + This has lead to orders-of-magnitude improvements in memory use and + performance when simulating galaxies at Euclid/LSST densities. + +- The ellipticity sampling functions :func:`~glass.shapes.ellipticity_gaussian` + and :func:`~glass.shapes.ellipticity_intnorm` accept array inputs. + +- A bug causing incorrect results from :func:`~glass.points.effective_bias` has + been fixed. + + +2023.5 (31 May 2023) +-------------------- -Added ------ +- The point sampling functions in :mod:`glass.points` now accept extra + dimensions, and will broadcast leading axes across their inputs. They also + return an additional scalar or array with the counts of sampled galaxies. + +- The redshift sampling function :func:`glass.galaxies.redshifts_from_nz` now + supports array input for the ``counts`` argument. It accepts e.g. the number + of galaxies returned by the position sampling. + +- The profiling functionality in :mod:`glass.user` was removed in favour of + external packages. + + +2023.2 (1 Mar 2023) +------------------- + +- New user functions :func:`glass.user.save_cls` and + :func:`glass.user.load_cls` to save and load angular power spectra in the + *GLASS* format. + +- Some type hints were added to library functions. These are mostly + perfunctory at this time, but there is interest in adding proper typing + support in the future, including use of the Array API. + +- The ``glass.matter`` module was removed in favour of the more + appropriately-named :mod:`glass.shells` module for shell definitions. + +- Instead of using an array of shell boundaries and separate ``MatterWeights``, + shells are now entirely defined by a :class:`glass.shells.RadialWindow` + window function. + +- Many functions have an improved interface thanks to the previous point: + + - The ``glass.math.restrict_interval`` function has been replaced by + :func:`glass.shells.restrict`, as shells are now defined by + window functions instead of sharp intervals. + + - The :func:`glass.points.effective_bias` function now takes a window + function as input and computes its effective bias parameter. + + - The ``glass.galaxies.constant_densities`` and ``density_from_dndz`` + functions have been removed, since densities can now easily be partitioned + by window functions using :func:`glass.shells.restrict` and + :func:`glass.shells.partition`. + + - The ``zmin`` and ``zmax`` parameters of `glass.galaxies.redshifts_from_nz` + have been removed for the same reason. + + - The ``glass.lensing.multi_plane_weights`` function, which computed all + lensing weights at once, is replaced by the ``add_window`` method of + :class:`glass.lensing.MultiPlaneConvergence`, which adds a convergence + plane given by a :class:`~glass.shells.RadialWindow` at its effective + redshift. + + - The :func:`glass.lensing.multi_plane_matrix` function now takes a sequence + of :class:`~glass.shells.RadialWindow`. It no longer returns the list of + source redshifts, since these are now independently available as the + effective redshifts of the windows. + +- The arguments of the :class:`~glass.lensing.MultiPlaneConvergence` method + ``add_plane`` have been renamed to ``zsrc`` and ``wlens`` from the more + ambiguous ``z`` and ``w`` (which could be confused with "window"). The + properties ``z`` and ``w`` that returned these values have been similarly + changed. + + +2023.1 (31 Jan 2023) +-------------------- - **Initial wide release for GLASS paper** diff --git a/docs/user/definitions.rst b/docs/user/definitions.rst index e365233a..3a53380b 100644 --- a/docs/user/definitions.rst +++ b/docs/user/definitions.rst @@ -6,6 +6,16 @@ The *GLASS* code uses the following mathematical definitions. .. glossary:: + deflection + The deflection :math:`\alpha` is a complex value with spin weight + :math:`1`. It describes the displacement of a position along a geodesic + (i.e. great circle). The angular distance of the displacement is the + absolute value :math:`|\alpha|`. The direction of the displacement is + the angle given by the complex argument :math:`\arg\alpha`, such that + :math:`\arg\alpha = 0^\circ` is north, :math:`\arg\alpha = 90^\circ` is + east, :math:`\arg\alpha = 180^\circ` is south, and :math:`\arg\alpha = + -90^\circ` is west. + ellipticity If :math:`q = b/a` is the axis ratio of an elliptical isophote with semi-major axis :math:`a` and semi-minor axis :math:`b`, and :math:`\phi` diff --git a/docs/user/how-glass-works.rst b/docs/user/how-glass-works.rst index 8fbfc003..bd94ba95 100644 --- a/docs/user/how-glass-works.rst +++ b/docs/user/how-glass-works.rst @@ -44,7 +44,6 @@ which are flat and non-overlapping. plt.fill_between(za, np.zeros_like(wa), wa, alpha=0.5) plt.annotate(f'shell {i+1}', (zeff, 0.5), ha='center', va='center') - plt.ylim(0., 2.) plt.xlabel('redshift $z$') plt.ylabel('window function $W(z)$') plt.tight_layout() @@ -63,6 +62,48 @@ then simulates the (radially) continuous field :math:`F(z)` as the (radially) discretised fields :math:`F_i`. +.. _user-window-functions: + +Window functions +^^^^^^^^^^^^^^^^ + +*GLASS* supports arbitrary window functions (although the computation of +:ref:`line-of-sight integrals ` makes some assumptions). +The following :ref:`window functions ` are +included: + +.. plot:: + + from glass.shells import (redshift_grid, tophat_windows, linear_windows, + cubic_windows) + + plot_windows = [tophat_windows, linear_windows, + cubic_windows] + nr = (len(plot_windows)+1)//2 + + fig, axes = plt.subplots(nr, 2, figsize=(8, nr*3), layout="constrained", + squeeze=False, sharex=False, sharey=True) + + zs = redshift_grid(0., 0.5, dz=0.1) + zt = np.linspace(0., 0.5, 200) + + for ax in axes.flat: + ax.axis(False) + for windows, ax in zip(plot_windows, axes.flat): + ws = windows(zs) + wt = np.zeros_like(zt) + ax.axis(True) + ax.set_title(windows.__name__) + for i, (za, wa, zeff) in enumerate(ws): + wt += np.interp(zt, za, wa, left=0., right=0.) + ax.fill_between(za, np.zeros_like(wa), wa, alpha=0.5) + ax.plot(zt, wt, c="k", lw=2) + for ax in axes.flat: + ax.set_xlabel("redshift $z$") + for ax in axes[:, 0]: + ax.set_ylabel("window function $W(z)$") + + Angular discretisation ---------------------- @@ -89,6 +130,8 @@ difference between the two is whether or not the pixel window function is applied to the spherical harmonic expansion of the fields. +.. _user-los-integrals: + Line-of-sight integrals ----------------------- diff --git a/docs/user/publications.rst b/docs/user/publications.rst index 84d499b7..01f428f1 100644 --- a/docs/user/publications.rst +++ b/docs/user/publications.rst @@ -4,7 +4,7 @@ Publications The following publications about *GLASS* exist. * | *GLASS: Generator for Large Scale Structure* - | Tessore N., Loureiro A., Joachimi B., von Wietersheim-Kramsta M. - | arXiv e-prints, 2023 + | Tessore N., Loureiro A., Joachimi B., von Wietersheim-Kramsta M., Jeffrey N. + | The Open Journal of Astrophysics 6, 11 (2023) (`arXiv `_, - `ADS `_) + `ADS `_) diff --git a/glass/__init__.py b/glass/__init__.py new file mode 100644 index 00000000..9a9e4c1b --- /dev/null +++ b/glass/__init__.py @@ -0,0 +1,4 @@ +try: + from ._version import __version__, __version_tuple__ # noqa: F401 +except ModuleNotFoundError: + pass diff --git a/glass/all.py b/glass/all.py deleted file mode 100644 index 17aae9f3..00000000 --- a/glass/all.py +++ /dev/null @@ -1,6 +0,0 @@ -# author: Nicolas Tessore -# license: MIT -'''meta-module that imports all modules from the GLASS namespace''' - - -(lambda: [__import__(_.name) for _ in __import__('pkgutil').iter_modules(__import__(__package__).__path__, __package__ + '.')])() # noqa diff --git a/glass/core/__init__.py b/glass/core/__init__.py new file mode 100644 index 00000000..7e3ee958 --- /dev/null +++ b/glass/core/__init__.py @@ -0,0 +1,12 @@ +# author: Nicolas Tessore +# license: MIT +''' +Core functions (:mod:`glass.core`) +================================== + +.. currentmodule:: glass.core + +The :mod:`glass.core` module contains core functionality for developing +GLASS modules. + +''' diff --git a/glass/core/algorithm.py b/glass/core/algorithm.py new file mode 100644 index 00000000..c5fb5a32 --- /dev/null +++ b/glass/core/algorithm.py @@ -0,0 +1,70 @@ +# author: Nicolas Tessore +# license: MIT +'''core module for algorithms''' + +from __future__ import annotations + +import numpy as np +from numpy.typing import ArrayLike + + +def nnls( + a: ArrayLike, + b: ArrayLike, + *, + tol: float = 0.0, + maxiter: int | None = None, +) -> ArrayLike: + """Compute a non-negative least squares solution. + + Implementation of the algorithm due to [1]_ as described in [2]_. + + References + ---------- + .. [1] Lawson, C. L. and Hanson, R. J. (1995), Solving Least Squares + Problems. doi: 10.1137/1.9781611971217 + .. [2] Bro, R. and De Jong, S. (1997), A fast + non-negativity-constrained least squares algorithm. J. + Chemometrics, 11, 393-401. + + """ + + a = np.asanyarray(a) + b = np.asanyarray(b) + + if a.ndim != 2: + raise ValueError("input `a` is not a matrix") + if b.ndim != 1: + raise ValueError("input `b` is not a vector") + if a.shape[0] != b.shape[0]: + raise ValueError("the shapes of `a` and `b` do not match") + + _, n = a.shape + + if maxiter is None: + maxiter = 3 * n + + index = np.arange(n) + p = np.full(n, False) + x = np.zeros(n) + for i in range(maxiter): + if np.all(p): + break + w = np.dot(b - a @ x, a) + m = index[~p][np.argmax(w[~p])] + if w[m] <= tol: + break + p[m] = True + while True: + ap = a[:, p] + xp = x[p] + sp = np.linalg.solve(ap.T @ ap, b @ ap) + t = (sp <= 0) + if not np.any(t): + break + alpha = -np.min(xp[t]/(xp[t] - sp[t])) + x[p] += alpha * (sp - xp) + p[x <= 0] = False + x[p] = sp + x[~p] = 0 + return x diff --git a/glass/core/array.py b/glass/core/array.py new file mode 100644 index 00000000..09784e21 --- /dev/null +++ b/glass/core/array.py @@ -0,0 +1,80 @@ +# author: Nicolas Tessore +# license: MIT +'''module for array utilities''' + +import numpy as np +from functools import partial + + +def broadcast_first(*arrays): + """Broadcast arrays, treating the first axis as common.""" + arrays = tuple(np.moveaxis(a, 0, -1) if np.ndim(a) else a for a in arrays) + arrays = np.broadcast_arrays(*arrays) + arrays = tuple(np.moveaxis(a, -1, 0) if np.ndim(a) else a for a in arrays) + return arrays + + +def broadcast_leading_axes(*args): + '''Broadcast all but the last N axes. + + Returns the shape of the broadcast dimensions, and all input arrays + with leading axes matching that shape. + + Example + ------- + Broadcast all dimensions of ``a``, all except the last dimension of + ``b``, and all except the last two dimensions of ``c``. + + >>> a = 0 + >>> b = np.zeros((4, 10)) + >>> c = np.zeros((3, 1, 5, 6)) + >>> dims, a, b, c = broadcast_leading_axes((a, 0), (b, 1), (c, 2)) + >>> dims + (3, 4) + >>> a.shape + (3, 4) + >>> b.shape + (3, 4, 10) + >>> c.shape + (3, 4, 5, 6) + + ''' + + shapes, trails = [], [] + for a, n in args: + s = np.shape(a) + i = len(s) - n + shapes.append(s[:i]) + trails.append(s[i:]) + dims = np.broadcast_shapes(*shapes) + arrs = (np.broadcast_to(a, dims + t) for (a, _), t in zip(args, trails)) + return (dims, *arrs) + + +def ndinterp(x, xp, fp, axis=-1, left=None, right=None, period=None): + '''interpolate multi-dimensional array over axis''' + return np.apply_along_axis(partial(np.interp, x, xp), axis, fp, + left=left, right=right, period=period) + + +def trapz_product(f, *ff, axis=-1): + '''trapezoidal rule for a product of functions''' + x, _ = f + for x_, _ in ff: + x = np.union1d(x[(x >= x_[0]) & (x <= x_[-1])], + x_[(x_ >= x[0]) & (x_ <= x[-1])]) + y = np.interp(x, *f) + for f_ in ff: + y *= np.interp(x, *f_) + return np.trapz(y, x, axis=axis) + + +def cumtrapz(f, x, dtype=None, out=None): + '''cumulative trapezoidal rule along last axis''' + + if out is None: + out = np.empty_like(f, dtype=dtype) + + np.cumsum((f[..., 1:] + f[..., :-1])/2*np.diff(x), axis=-1, out=out[..., 1:]) + out[..., 0] = 0 + return out diff --git a/glass/core/constants.py b/glass/core/constants.py new file mode 100644 index 00000000..9357dff7 --- /dev/null +++ b/glass/core/constants.py @@ -0,0 +1,9 @@ +# author: Nicolas Tessore +# license: MIT +'''module for constants''' + +PI = 3.1415926535897932384626433832795028841971693993751 + +DEGREE2_SPHERE = 60**4//100/PI +ARCMIN2_SPHERE = 60**6//100/PI +ARCSEC2_SPHERE = 60**8//100/PI diff --git a/glass/core/test/__init__.py b/glass/core/test/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/glass/core/test/test_algorithm.py b/glass/core/test/test_algorithm.py new file mode 100644 index 00000000..a25b1d39 --- /dev/null +++ b/glass/core/test/test_algorithm.py @@ -0,0 +1,25 @@ +import pytest + +# check if scipy is available for testing +try: + import scipy +except ImportError: + HAVE_SCIPY = False +else: + del scipy + HAVE_SCIPY = True + + +@pytest.mark.skipif(not HAVE_SCIPY, reason="test requires SciPy") +def test_nnls(): + import numpy as np + from scipy.optimize import nnls as nnls_scipy + from glass.core.algorithm import nnls as nnls_glass + + a = np.random.randn(100, 20) + b = np.random.randn(100) + + x_glass = nnls_glass(a, b) + x_scipy, _ = nnls_scipy(a, b) + + np.testing.assert_allclose(x_glass, x_scipy) diff --git a/glass/test/test_math.py b/glass/core/test/test_array.py similarity index 77% rename from glass/test/test_math.py rename to glass/core/test/test_array.py index c5db9c00..348e0048 100644 --- a/glass/test/test_math.py +++ b/glass/core/test/test_array.py @@ -2,8 +2,23 @@ import numpy.testing as npt +def test_broadcast_leading_axes(): + from glass.core.array import broadcast_leading_axes + + a = 0 + b = np.zeros((4, 10)) + c = np.zeros((3, 1, 5, 6)) + + dims, a, b, c = broadcast_leading_axes((a, 0), (b, 1), (c, 2)) + + assert dims == (3, 4) + assert a.shape == (3, 4) + assert b.shape == (3, 4, 10) + assert c.shape == (3, 4, 5, 6) + + def test_ndinterp(): - from glass.math import ndinterp + from glass.core.array import ndinterp # test 1d interpolation @@ -75,3 +90,17 @@ def test_ndinterp(): [[[2.15], [2.25], [2.35], [2.45]], [[2.45], [2.35], [2.25], [2.15]], [[2.15], [2.45], [2.25], [2.35]]]], atol=1e-15) + + +def test_trapz_product(): + from glass.core.array import trapz_product + + x1 = np.linspace(0, 2, 100) + f1 = np.full_like(x1, 2.0) + + x2 = np.linspace(1, 2, 10) + f2 = np.full_like(x2, 0.5) + + s = trapz_product((x1, f1), (x2, f2)) + + assert np.allclose(s, 1.0) diff --git a/glass/ext/__init__.py b/glass/ext/__init__.py new file mode 100644 index 00000000..695c446e --- /dev/null +++ b/glass/ext/__init__.py @@ -0,0 +1,20 @@ +'''Namespace loader for GLASS extension packages. + +Uses the pkgutil namespace mechanism to find "ext" submodules of +packages that provide a "glass" module. + +''' + + +def _extend_path(path, name): + import os.path + from pkgutil import extend_path + + _pkg, _, _mod = name.partition('.') + + return list(filter(os.path.isdir, + (os.path.join(p, _mod) + for p in extend_path(path, _pkg)))) + + +__path__ = _extend_path(__path__, __name__) diff --git a/glass/fields.py b/glass/fields.py index b81c3421..20defdb4 100644 --- a/glass/fields.py +++ b/glass/fields.py @@ -16,6 +16,13 @@ .. autofunction:: lognormal_gls .. autofunction:: generate_gaussian .. autofunction:: generate_lognormal +.. autofunction:: effective_cls + + +Utility functions +----------------- + +.. autofunction:: getcl ''' @@ -285,3 +292,109 @@ def generate_lognormal(gls: Cls, nside: int, shift: float = 1., *, # yield the lognormal map yield m + + +def getcl(cls, i, j, lmax=None): + """Return a specific angular power spectrum from an array. + + Return the angular power spectrum for indices *i* and *j* from an + array in *GLASS* ordering. + + Parameters + ---------- + cls : list of array_like + List of angular power spectra in *GLASS* ordering. + i, j : int + Combination of indices to return. + lmax : int, optional + Truncate the returned spectrum at this mode number. + + Returns + ------- + cl : array_like + The angular power spectrum for indices *i* and *j*. + + """ + if j > i: + i, j = j, i + cl = cls[i*(i+1)//2+i-j] + if lmax is not None: + if len(cl) > lmax + 1: + cl = cl[:lmax+1] + else: + cl = np.pad(cl, (0, lmax + 1 - len(cl))) + return cl + + +def effective_cls(cls, weights1, weights2=None, *, lmax=None): + """Compute effective angular power spectra from weights. + + Computes a linear combination of the angular power spectra *cls* + using the factors provided by *weights1* and *weights2*. Additional + axes in *weights1* and *weights2* produce arrays of spectra. + + Parameters + ---------- + cls : (N,) list of array_like + Angular matter power spectra to combine, in *GLASS* ordering. + weights1 : (N, \\*M1) array_like + Weight factors for spectra. The first axis must be equal to the + number of fields. + weights2 : (N, \\*M2) array_like, optional + Second set of weights. If not given, *weights1* is used. + lmax : int, optional + Truncate the angular power spectra at this mode number. If not + given, the longest input in *cls* will be used. + + Returns + ------- + cls : (\\*M1, \\*M2, LMAX+1) array_like + Dictionary of effective angular power spectra, where keys + correspond to the leading axes of *weights1* and *weights2*. + + """ + from itertools import combinations_with_replacement, product + + # this is the number of fields + n = int((2*len(cls))**0.5) + if n*(n+1)//2 != len(cls): + raise ValueError("length of cls is not a triangle number") + + # find lmax if not given + if lmax is None: + lmax = max(map(len, cls), default=0) - 1 + + # broadcast weights1 such that its shape ends in n + weights1 = np.asanyarray(weights1) + weights2 = np.asanyarray(weights2) if weights2 is not None else weights1 + + shape1, shape2 = weights1.shape, weights2.shape + for i, shape in enumerate((shape1, shape2)): + if not shape or shape[0] != n: + raise ValueError(f"shape mismatch between fields and weights{i+1}") + + # get the iterator over leading weight axes + # auto-spectra do not repeat identical computations + if weights2 is weights1: + pairs = combinations_with_replacement(np.ndindex(shape1[1:]), 2) + else: + pairs = product(np.ndindex(shape1[1:]), np.ndindex(shape2[1:])) + + # create the output array: axes for all input axes plus lmax+1 + out = np.empty(shape1[1:] + shape2[1:] + (lmax+1,)) + + # helper that will grab the entire first column (i.e. shells) + c = (slice(None),) + + # compute all combined cls from pairs + # if weights2 is weights1, set the transpose elements in one pass + for j1, j2 in pairs: + w1, w2 = weights1[c + j1], weights2[c + j2] + cl = sum( + w1[i1] * w2[i2] * getcl(cls, i1, i2, lmax=lmax) + for i1, i2 in np.ndindex(n, n) + ) + out[j1 + j2] = cl + if weights2 is weights1 and j1 != j2: + out[j2 + j1] = cl + return out diff --git a/glass/galaxies.py b/glass/galaxies.py index 7b9b2872..8b26b7c8 100644 --- a/glass/galaxies.py +++ b/glass/galaxies.py @@ -12,6 +12,7 @@ Functions --------- +.. autofunction:: redshifts .. autofunction:: redshifts_from_nz .. autofunction:: galaxy_shear .. autofunction:: gaussian_phz @@ -23,37 +24,73 @@ ''' +from __future__ import annotations + import numpy as np import healpix -from typing import Optional, Tuple +from numpy.typing import ArrayLike + +from .core.array import broadcast_leading_axes, cumtrapz +from .shells import RadialWindow + + +def redshifts(n: int | ArrayLike, w: RadialWindow, *, + rng: np.random.Generator | None = None + ) -> np.ndarray: + '''Sample redshifts from a radial window function. -from .math import cumtrapz + This function samples *n* redshifts from a distribution that follows + the given radial window function *w*. + Parameters + ---------- + n : int or array_like + Number of redshifts to sample. If an array is given, the + results are concatenated. + w : :class:`~glass.shells.RadialWindow` + Radial window function. + rng : :class:`~numpy.random.Generator`, optional + Random number generator. If not given, a default RNG is used. + + Returns + ------- + z : array_like + Random redshifts following the radial window function. + + ''' + return redshifts_from_nz(n, w.za, w.wa, rng=rng) -def redshifts_from_nz(size: int, z: np.ndarray, nz: np.ndarray, *, - rng: Optional[np.random.Generator] = None - ) -> Tuple[np.ndarray, Optional[np.ndarray]]: + +def redshifts_from_nz(count: int | ArrayLike, z: ArrayLike, nz: ArrayLike, *, + rng: np.random.Generator | None = None + ) -> np.ndarray: '''Generate galaxy redshifts from a source distribution. + The function supports sampling from multiple populations of + redshifts if *count* is an array or if there are additional axes in + the *z* or *nz* arrays. In this case, the shape of *count* and the + leading dimensions of *z* and *nz* are broadcast to a common shape, + and redshifts are sampled independently for each extra dimension. + The results are concatenated into a flat array. + Parameters ---------- - size : int - Number of redshifts to sample. + count : int or array_like + Number of redshifts to sample. If an array is given, its shape + is broadcast against the leading axes of *z* and *nz*. z, nz : array_like - Source distribution. Leading axes are treated as different - galaxy populations. + Source distribution. Leading axes are broadcast against the + shape of *count*. rng : :class:`~numpy.random.Generator`, optional - Random number generator. If not given, a default RNG will be - used. + Random number generator. If not given, a default RNG is used. Returns ------- - z : array_like - Redshifts sampled from the given source distribution. - pop : array_like or None - Index of the galaxy population from the leading axes of ``nz``; - or ``None`` if there are no galaxy populations. + redshifts : array_like + Redshifts sampled from the given source distribution. For + inputs with extra dimensions, returns a flattened 1-D array of + samples from all populations. ''' @@ -61,34 +98,29 @@ def redshifts_from_nz(size: int, z: np.ndarray, nz: np.ndarray, *, if rng is None: rng = np.random.default_rng() - # get galaxy populations - if np.ndim(nz) > 1: - pop = list(np.ndindex(np.shape(nz)[:-1])) - else: - pop = None + # bring inputs' leading axes into common shape + dims, count, z, nz = broadcast_leading_axes((count, 0), (z, 1), (nz, 1)) - # compute the as-yet unnormalised CDF of each galaxy population - cdf = cumtrapz(nz, z) + # list of results for all dimensions + redshifts = np.empty(count.sum()) - # compute probability to be in each galaxy population - p = cdf[..., -1]/cdf[..., -1].sum(axis=None, keepdims=True) + # keep track of the number of sampled redshifts + total = 0 - # now normalise the CDFs - cdf /= cdf[..., -1:] + # go through extra dimensions; also works if dims is empty + for k in np.ndindex(dims): - # sample redshifts and populations - if pop is not None: - x = rng.choice(len(pop), p=p, size=size) - gal_z = rng.uniform(0, 1, size=size) - for i, j in enumerate(pop): - s = (x == i) - gal_z[s] = np.interp(gal_z[s], cdf[j], z) - gal_pop = np.take(pop, x) - else: - gal_z = np.interp(rng.uniform(0, 1, size=size), cdf, z) - gal_pop = None + # compute the CDF of each galaxy population + cdf = cumtrapz(nz[k], z[k], dtype=float) + cdf /= cdf[-1] + + # sample redshifts and store result + redshifts[total:total+count[k]] = np.interp(rng.uniform(0, 1, size=count[k]), cdf, z[k]) + total += count[k] - return gal_z, gal_pop + assert total == redshifts.size + + return redshifts def galaxy_shear(lon: np.ndarray, lat: np.ndarray, eps: np.ndarray, @@ -147,33 +179,44 @@ def galaxy_shear(lon: np.ndarray, lat: np.ndarray, eps: np.ndarray, return g -def gaussian_phz(z: np.ndarray, sigma_0: float, - rng: Optional[np.random.Generator] = None) -> np.ndarray: +def gaussian_phz(z: ArrayLike, sigma_0: float | ArrayLike, *, + lower: ArrayLike | None = None, + upper: ArrayLike | None = None, + rng: np.random.Generator | None = None) -> np.ndarray: r'''Photometric redshifts assuming a Gaussian error. - A simple toy model of photometric redshift errors that assumes a Gaussian - error with redshift-dependent standard deviation :math:`\sigma(z) = (1 + z) - \sigma_0` [1]_. + A simple toy model of photometric redshift errors that assumes a + Gaussian error with redshift-dependent standard deviation + :math:`\sigma(z) = (1 + z) \sigma_0` [1]_. Parameters ---------- z : array_like True redshifts. - sigma_0 : float + sigma_0 : float or array_like Redshift error in the tomographic binning at zero redshift. + lower, upper : float or array_like, optional + Bounds for the returned photometric redshifts. rng : :class:`~numpy.random.Generator`, optional - Random number generator. If not given, a default RNG will be used. + Random number generator. If not given, a default RNG is used. Returns ------- phz : array_like - Photometric redshifts assuming Gaussian errors, of the same shape as - ``z``. + Photometric redshifts assuming Gaussian errors, of the same + shape as *z*. + + Warnings + -------- + The *lower* and *upper* bounds are implemented using plain rejection + sampling from the non-truncated normal distribution. If bounds are + used, they should always contain significant probability mass. See Also -------- glass.observations.tomo_nz_gausserr : - Create tomographic redshift distributions assuming the same model. + Create tomographic redshift distributions assuming the same + model. References ---------- @@ -190,17 +233,31 @@ def gaussian_phz(z: np.ndarray, sigma_0: float, if rng is None: rng = np.random.default_rng() - size = np.shape(z) - z = np.reshape(z, (-1,)) + sigma = np.add(1, z)*sigma_0 + dims = np.shape(sigma) - zphot = rng.normal(z, (1 + z)*sigma_0) + zphot = rng.normal(z, sigma) - trunc = np.where(zphot < 0)[0] - while trunc.size: - zphot[trunc] = rng.normal(z[trunc], (1 + z[trunc])*sigma_0) - trunc = trunc[zphot[trunc] < 0] + if lower is None: + lower = 0. + if upper is None: + upper = np.inf + + if not np.all(lower < upper): + raise ValueError("requires lower < upper") + + if not dims: + while zphot < lower or zphot > upper: + zphot = rng.normal(z, sigma) + else: + z = np.broadcast_to(z, dims) + trunc = np.where((zphot < lower) | (zphot > upper))[0] + while trunc.size: + znew = rng.normal(z[trunc], sigma[trunc]) + zphot[trunc] = znew + trunc = trunc[(znew < lower) | (znew > upper)] - return zphot.reshape(size) + return zphot def kappa_ia_nla(delta, zeff, a_ia, cosmo, *, z0=0., eta=0., lbar=0., @@ -297,4 +354,4 @@ def kappa_ia_nla(delta, zeff, a_ia, cosmo, *, z0=0., eta=0., lbar=0., f_nla = prefactor * inverse_linear_growth * redshift_dependence \ * luminosity_dependence - return delta * f_nla + return delta * f_nla \ No newline at end of file diff --git a/glass/lensing.py b/glass/lensing.py index 91342101..ee01e54c 100644 --- a/glass/lensing.py +++ b/glass/lensing.py @@ -14,36 +14,74 @@ .. autoclass:: MultiPlaneConvergence .. autofunction:: multi_plane_matrix +.. autofunction:: multi_plane_weights Lensing fields -------------- +.. autofunction:: from_convergence .. autofunction:: shear_from_convergence + +Applying lensing +---------------- + +.. autofunction:: deflect + ''' import numpy as np import healpy as hp # typing support -from typing import Optional, Sequence, TYPE_CHECKING +from typing import Optional, Sequence, Tuple, TYPE_CHECKING +from numpy.typing import NDArray, ArrayLike if TYPE_CHECKING: + # to prevent circular dependencies, only import these for type checking from cosmology import Cosmology from .shells import RadialWindow -def shear_from_convergence(kappa: np.ndarray, lmax: Optional[int] = None, *, - discretized: bool = True) -> np.ndarray: - r'''weak lensing shear from convergence +def from_convergence(kappa: NDArray, lmax: Optional[int] = None, *, + potential: bool = False, + deflection: bool = False, + shear: bool = False, + discretized: bool = True, + ) -> Tuple[NDArray, ...]: + r'''Compute other weak lensing maps from the convergence. + + Takes a weak lensing convergence map and returns one or more of + deflection potential, deflection, and shear maps. The maps are + computed via spherical harmonic transforms. + + Parameters + ---------- + kappa : array_like + HEALPix map of the convergence field. + lmax : int, optional + Maximum angular mode number to use in the transform. + potential, deflection, shear : bool, optional + Which lensing maps to return. + + Returns + ------- + psi : array_like + Map of the deflection potential. Only returned if ``potential`` + is true. + alpha : array_like + Map of the deflection (complex). Only returned if ``deflection`` + if true. + gamma : array_like + Map of the shear (complex). Only returned if ``shear`` is true. Notes ----- - The shear field is computed from the convergence or deflection potential in - the following way. + The weak lensing fields are computed from the convergence or + deflection potential in the following way. [1]_ - Define the spin-raising and spin-lowering operators of the spin-weighted - spherical harmonics as + Define the spin-raising and spin-lowering operators of the + spin-weighted spherical harmonics as .. math:: @@ -52,39 +90,141 @@ def shear_from_convergence(kappa: np.ndarray, lmax: Optional[int] = None, *, \bar{\eth} {}_sY_{lm} = -\sqrt{(l+s)(l-s+1)} \, {}_{s-1}Y_{lm} \;. - The convergence field :math:`\kappa` is related to the deflection potential - field :math:`\phi` as + The convergence field :math:`\kappa` is related to the deflection + potential field :math:`\psi` by the Poisson equation, .. math:: - 2 \kappa = \eth\bar{\eth} \, \phi = \bar{\eth}\eth \, \phi \;. + 2 \kappa + = \eth\bar{\eth} \, \psi + = \bar{\eth}\eth \, \psi \;. The convergence modes :math:`\kappa_{lm}` are hence related to the - deflection potential modes :math:`\phi_{lm}` as + deflection potential modes :math:`\psi_{lm}` as + + .. math:: + + 2 \kappa_{lm} + = -l \, (l+1) \, \psi_{lm} \;. + + The :term:`deflection` :math:`\alpha` is the gradient of the + deflection potential :math:`\psi`. On the sphere, this is .. math:: - 2 \kappa_{lm} = -l \, (l+1) \, \phi_{lm} \;. + \alpha + = \eth \, \psi \;. - The shear field :math:`\gamma` is related to the deflection potential field - as + The deflection field has spin weight :math:`1` in the HEALPix + convention, in order for points to be deflected towards regions of + positive convergence. The modes :math:`\alpha_{lm}` of the + deflection field are hence .. math:: - 2 \gamma = \eth\eth \, \phi - \quad\text{or}\quad - 2 \gamma = \bar{\eth}\bar{\eth} \, \phi \;, + \alpha_{lm} + = \sqrt{l \, (l+1)} \, \psi_{lm} \;. - depending on the definition of the shear field spin weight as :math:`2` or - :math:`-2`. In either case, the shear modes :math:`\gamma_{lm}` are - related to the deflection potential modes as + The shear field :math:`\gamma` is related to the deflection + potential :math:`\psi` and deflection :math:`\alpha` as .. math:: - 2 \gamma_{lm} = \sqrt{(l+2) \, (l+1) \, l \, (l-1)} \, \phi_{lm} \;. + 2 \gamma + = \eth\eth \, \psi + = \eth \, \alpha \;, - The shear modes can therefore be obtained via the convergence, or - directly from the deflection potential. + and thus has spin weight :math:`2`. The shear modes + :math:`\gamma_{lm}` are related to the deflection potential modes as + + .. math:: + + 2 \gamma_{lm} + = \sqrt{(l+2) \, (l+1) \, l \, (l-1)} \, \psi_{lm} \;. + + References + ---------- + .. [1] Tessore N., et al., OJAp, 6, 11 (2023). + doi:10.21105/astro.2302.01942 + + ''' + + # no output means no computation, return empty tuple + if not (potential or deflection or shear): + return () + + # get the NSIDE parameter + nside = hp.get_nside(kappa) + if lmax is None: + lmax = 3*nside - 1 + + # compute alm + alm = hp.map2alm(kappa, lmax=lmax, pol=False, use_pixel_weights=True) + + # mode number; all conversions are factors of this + l = np.arange(lmax+1) + + # this tuple will be returned + results = () + + # convert convergence to potential + fl = np.divide(-2, l*(l+1), where=(l > 0), out=np.zeros(lmax+1)) + hp.almxfl(alm, fl, inplace=True) + + # if potential is requested, compute map and add to output + if potential: + psi = hp.alm2map(alm, nside, lmax=lmax) + results += (psi,) + + # if no spin-weighted maps are requested, stop here + if not (deflection or shear): + return results + + # zero B-modes for spin-weighted maps + blm = np.zeros_like(alm) + + # compute deflection alms in place + fl = np.sqrt(l*(l+1)) + # TODO: missing spin-1 pixel window function here + hp.almxfl(alm, fl, inplace=True) + + # if deflection is requested, compute spin-1 maps and add to output + if deflection: + alpha = hp.alm2map_spin([alm, blm], nside, 1, lmax) + alpha = alpha[0] + 1j*alpha[1] + results += (alpha,) + + # if no shear is requested, stop here + if not shear: + return results + + # compute shear alms in place + # if discretised, factor out spin-0 kernel and apply spin-2 kernel + fl = np.sqrt((l-1)*(l+2), where=(l > 0), out=np.zeros(lmax+1)) + fl /= 2 + if discretized: + pw0, pw2 = hp.pixwin(nside, lmax=lmax, pol=True) + fl *= pw2/pw0 + hp.almxfl(alm, fl, inplace=True) + + # transform to shear maps + gamma = hp.alm2map_spin([alm, blm], nside, 2, lmax) + gamma = gamma[0] + 1j*gamma[1] + results += (gamma,) + + # all done + return results + + +def shear_from_convergence(kappa: np.ndarray, lmax: Optional[int] = None, *, + discretized: bool = True) -> np.ndarray: + r'''Weak lensing shear from convergence. + + .. deprecated:: 2023.6 + Use the more general :func:`from_convergence` function instead. + + Computes the shear from the convergence using a spherical harmonic + transform. ''' @@ -211,12 +351,113 @@ def wlens(self) -> float: return self.w3 -def multi_plane_matrix(ws: Sequence['RadialWindow'], cosmo: 'Cosmology' - ) -> np.ndarray: - '''Compute the matrix of lensing contributions from each shell.''' +def multi_plane_matrix( + shells: Sequence["RadialWindow"], + cosmo: "Cosmology", +) -> ArrayLike: + """Compute the matrix of lensing contributions from each shell.""" mpc = MultiPlaneConvergence(cosmo) - wmat = np.eye(len(ws)) - for i, w in enumerate(ws): + wmat = np.eye(len(shells)) + for i, w in enumerate(shells): mpc.add_window(wmat[i].copy(), w) wmat[i, :] = mpc.kappa return wmat + + +def multi_plane_weights( + weights: ArrayLike, + shells: Sequence["RadialWindow"], + cosmo: "Cosmology", +) -> ArrayLike: + """Compute effective weights for multi-plane convergence. + + Converts an array *weights* of relative weights for each shell + into the equivalent array of relative lensing weights. + + This is the discretised version of the integral that turns a + redshift distribution :math:`n(z)` into the lensing efficiency + sometimes denoted :math:`g(z)` or :math:`q(z)`. + + Parameters + ---------- + weights : array_like + Relative weight of each shell. The first axis must broadcast + against the number of shells, and is normalised internally. + shells : list of :class:`~glass.shells.RadialWindow` + Window functions of the shells. + cosmo : Cosmology + Cosmology instance. + + Returns + ------- + lensing_weights : array_like + Relative lensing weight of each shell. + + """ + # ensure shape of weights ends with the number of shells + shape = np.shape(weights) + if not shape or shape[0] != len(shells): + raise ValueError("shape mismatch between weights and shells") + # normalise weights + weights = weights / np.sum(weights, axis=0) + # combine weights and the matrix of lensing contributions + mat = multi_plane_matrix(shells, cosmo) + return np.matmul(mat.T, weights) + + +def deflect(lon: ArrayLike, lat: ArrayLike, alpha: ArrayLike) -> NDArray: + """Apply deflections to positions. + + Takes an array of :term:`deflection` values and applies them + to the given positions. + + Parameters + ---------- + lon, lat : array_like + Longitudes and latitudes to be deflected. + alpha : array_like + Deflection values. Must be complex-valued or have a leading + axis of size 2 for the real and imaginary component. + + Returns + ------- + lon, lat : array_like + Longitudes and latitudes after deflection. + + Notes + ----- + Deflections on the sphere are :term:`defined ` as + follows: The complex deflection :math:`\\alpha` transports a point + on the sphere an angular distance :math:`|\\alpha|` along the + geodesic with bearing :math:`\\arg\\alpha` in the original point. + + In the language of differential geometry, this function is the + exponential map. + + """ + + alpha = np.asanyarray(alpha) + if np.iscomplexobj(alpha): + alpha1, alpha2 = alpha.real, alpha.imag + else: + alpha1, alpha2 = alpha + + # we know great-circle navigation: + # θ' = arctan2(√[(cosθ sin|α| - sinθ cos|α| cosγ)² + (sinθ sinγ)²], + # cosθ cos|α| + sinθ sin|α| cosγ) + # δ = arctan2(sin|α| sinγ, sinθ cos|α| - cosθ sin|α| cosγ) + + t = np.radians(lat) + ct, st = np.sin(t), np.cos(t) # sin and cos flipped: lat not co-lat + + a = np.hypot(alpha1, alpha2) # abs(alpha) + g = np.arctan2(alpha2, alpha1) # arg(alpha) + ca, sa = np.cos(a), np.sin(a) + cg, sg = np.cos(g), np.sin(g) + + # flipped atan2 arguments for lat instead of co-lat + tp = np.arctan2(ct*ca + st*sa*cg, np.hypot(ct*sa - st*ca*cg, st*sg)) + + d = np.arctan2(sa*sg, st*ca - ct*sa*cg) + + return lon - np.degrees(d), np.degrees(tp) diff --git a/glass/math.py b/glass/math.py deleted file mode 100644 index ab4e7907..00000000 --- a/glass/math.py +++ /dev/null @@ -1,39 +0,0 @@ -# author: Nicolas Tessore -# license: MIT -'''module for mathematical utilities''' - -import numpy as np -from functools import partial - -# constants -DEGREE2_SPHERE = 60**4//100/np.pi -ARCMIN2_SPHERE = 60**6//100/np.pi -ARCSEC2_SPHERE = 60**8//100/np.pi - - -def ndinterp(x, xp, fp, axis=-1, left=None, right=None, period=None): - '''interpolate multi-dimensional array over axis''' - return np.apply_along_axis(partial(np.interp, x, xp), axis, fp, - left=left, right=right, period=period) - - -def trapz_product(f, *ff, axis=-1): - '''trapezoidal rule for a product of functions''' - x, _ = f - for x_, _ in ff: - x = np.union1d(x, x_[(x_ > x[0]) & (x_ < x[-1])]) - y = np.interp(x, *f) - for f_ in ff: - y *= np.interp(x, *f_) - return np.trapz(y, x, axis=axis) - - -def cumtrapz(f, x, out=None): - '''cumulative trapezoidal rule along last axis''' - - if out is None: - out = np.empty_like(f) - - np.cumsum((f[..., 1:] + f[..., :-1])/2*np.diff(x), axis=-1, out=out[..., 1:]) - out[..., 0] = 0 - return out diff --git a/glass/observations.py b/glass/observations.py index f9259f31..44883e02 100644 --- a/glass/observations.py +++ b/glass/observations.py @@ -35,7 +35,7 @@ from typing import Optional, Tuple, List from numpy.typing import ArrayLike -from .math import cumtrapz +from .core.array import cumtrapz def vmap_galactic_ecliptic(nside: int, galactic: Tuple[float, float] = (30, 90), diff --git a/glass/points.py b/glass/points.py index e7f4b650..c2b4ddae 100644 --- a/glass/points.py +++ b/glass/points.py @@ -14,6 +14,7 @@ .. autofunction:: positions_from_delta .. autofunction:: uniform_positions +.. autofunction:: position_weights Bias @@ -33,7 +34,8 @@ import numpy as np import healpix -from .math import ARCMIN2_SPHERE, trapz_product +from .core.array import broadcast_first, broadcast_leading_axes, trapz_product +from .core.constants import ARCMIN2_SPHERE def effective_bias(z, bz, w): @@ -84,44 +86,62 @@ def loglinear_bias(delta, b): def positions_from_delta(ngal, delta, bias=None, vis=None, *, - bias_model='linear', remove_monopole=False, rng=None): + bias_model='linear', remove_monopole=False, + batch=1_000_000, rng=None): '''Generate positions tracing a density contrast. - The map of expected number counts is constructed from the number density, - density contrast, an optional bias model, and an optional visibility map. + The map of expected number counts is constructed from the number + density, density contrast, an optional bias model, and an optional + visibility map. - If ``remove_monopole`` is set, the monopole of the computed density contrast - is removed. Over the full sky, the mean number density of the map will then - match the given number density exactly. This, however, means that an - effectively different bias model is being used, unless the monopole is - already zero in the first place. + If ``remove_monopole`` is set, the monopole of the computed density + contrast is removed. Over the full sky, the mean number density of + the map will then match the given number density exactly. This, + however, means that an effectively different bias model is being + used, unless the monopole is already zero in the first place. + + The function supports multi-dimensional input for the ``ngal``, + ``delta``, ``bias``, and ``vis`` parameters. Extra dimensions are + broadcast to a common shape, and treated as separate populations of + points. These are then sampled independently, and the results + concatenated into a flat list of longitudes and latitudes. The + number of points per population is returned in ``count`` as an array + in the shape of the extra dimensions. Parameters ---------- - ngal : float + ngal : float or array_like Number density, expected number of points per arcmin2. delta : array_like - Map of the input density contrast. This is fed into the bias model to - produce the density contrast for sampling. - bias : float, optional + Map of the input density contrast. This is fed into the bias + model to produce the density contrast for sampling. + bias : float or array_like, optional Bias parameter, is passed as an argument to the bias model. vis : array_like, optional - Visibility map for the observed points. This is multiplied with the - full sky number count map, and must hence be of compatible shape. + Visibility map for the observed points. This is multiplied with + the full sky number count map, and must hence be of compatible + shape. bias_model : str or callable, optional - The bias model to apply. If a string, refers to a function in the - points module, e.g. ``'linear'`` for ``glass.points.linear_bias`` or - ``'loglinear'`` for ``glass.points.loglinear_bias``. + The bias model to apply. If a string, refers to a function in + the :mod:`~glass.points` module, e.g. ``'linear'`` for + :func:`linear_bias()` or ``'loglinear'`` for + :func:`loglinear_bias`. remove_monopole : bool, optional - If true, the monopole of the density contrast after biasing is fixed to - zero. + If true, the monopole of the density contrast after biasing is + fixed to zero. + batch : int, optional + Maximum number of positions to yield in one batch. rng : :class:`~numpy.random.Generator`, optional - Random number generator. If not given, a default RNG will be used. + Random number generator. If not given, a default RNG is used. - Returns - ------- + Yields + ------ lon, lat : array_like Columns of longitudes and latitudes for the sampled points. + count : int or array_like + The number of sampled points. If multiple populations are + sampled, an array of counts in the shape of the extra + dimensions is returned. ''' @@ -135,71 +155,112 @@ def positions_from_delta(ngal, delta, bias=None, vis=None, *, elif not callable(bias_model): raise ValueError('bias_model must be string or callable') - # compute density contrast from bias model, or copy - if bias is None: - n = np.copy(delta) - else: - n = bias_model(delta, bias) - - # remove monopole if asked to - if remove_monopole: - n -= np.mean(n, keepdims=True) - - # turn into number count, modifying the array in place - n += 1 - n *= ARCMIN2_SPHERE/n.size*ngal - - # apply visibility if given + # broadcast inputs to common shape of extra dimensions + inputs = [(ngal, 0), (delta, 1)] + if bias is not None: + inputs += [(bias, 0)] if vis is not None: - n *= vis - - # clip number density at zero - np.clip(n, 0, None, out=n) - - # sample actual number in each pixel - n = rng.poisson(n) - - # total number of sampled points - ntot = n.sum() - - # for converting randomly sampled positions to HEALPix indices - npix = n.shape[-1] - nside = healpix.npix2nside(npix) - - # these will hold the results - lon = np.empty(ntot) - lat = np.empty(ntot) - - # sample batches of 10000 pixels - batch = 10_000 - ncur = 0 - for i in range(0, npix, batch): - k = n[i:i+batch] - bpix = np.repeat(np.arange(i, i+k.size), k) - blon, blat = healpix.randang(nside, bpix, lonlat=True, rng=rng) - lon[ncur:ncur+blon.size] = blon - lat[ncur:ncur+blat.size] = blat - ncur += bpix.size - - assert ncur == ntot, 'internal error in sampling' - - return lon, lat + inputs += [(vis, 1)] + dims, ngal, delta, *rest = broadcast_leading_axes(*inputs) + if bias is not None: + bias, *rest = rest + if vis is not None: + vis, *rest = rest + + # iterate the leading dimensions + for k in np.ndindex(dims): + + # compute density contrast from bias model, or copy + if bias is None: + n = np.copy(delta[k]) + else: + n = bias_model(delta[k], bias[k]) + + # remove monopole if asked to + if remove_monopole: + n -= np.mean(n, keepdims=True) + + # turn into number count, modifying the array in place + n += 1 + n *= ARCMIN2_SPHERE/n.size*ngal[k] + + # apply visibility if given + if vis is not None: + n *= vis[k] + + # clip number density at zero + np.clip(n, 0, None, out=n) + + # sample actual number in each pixel + n = rng.poisson(n) + + # total number of points + count = n.sum() + # don't go through pixels if there are no points + if count == 0: + continue + + # for converting randomly sampled positions to HEALPix indices + npix = n.shape[-1] + nside = healpix.npix2nside(npix) + + # create a mask to report the count in the right axis + if dims: + cmask = np.zeros(dims, dtype=int) + cmask[k] = 1 + else: + cmask = 1 + + # sample the map in batches + step = 1000 + start, stop, size = 0, 0, 0 + while count: + # tally this group of pixels + q = np.cumsum(n[stop:stop+step]) + # does this group of pixels fill the batch? + if size + q[-1] < min(batch, count): + # no, we need the next group of pixels to fill the batch + stop += step + size += q[-1] + else: + # how many pixels from this group do we need? + stop += np.searchsorted(q, batch - size, side='right') + # if the first pixel alone is too much, use it anyway + if stop == start: + stop += 1 + # sample this batch of pixels + ipix = np.repeat(np.arange(start, stop), n[start:stop]) + lon, lat = healpix.randang(nside, ipix, lonlat=True, rng=rng) + # next batch + start, size = stop, 0 + # keep track of remaining number of points + count -= ipix.size + # yield the batch + yield lon, lat, ipix.size*cmask + + # make sure that the correct number of pixels was sampled + assert np.sum(n[stop:]) == 0 def uniform_positions(ngal, *, rng=None): '''Generate positions uniformly over the sphere. + The function supports array input for the ``ngal`` parameter. + Parameters ---------- - ngal : float + ngal : float or array_like Number density, expected number of positions per arcmin2. rng : :class:`~numpy.random.Generator`, optional Random number generator. If not given, a default RNG will be used. - Returns - ------- - lon, lat : array_like + Yields + ------ + lon, lat : array_like or list of array_like Columns of longitudes and latitudes for the sampled points. + count : int or list of ints + The number of sampled points. For array inputs, an array of + counts with the same shape is returned. ''' @@ -208,10 +269,63 @@ def uniform_positions(ngal, *, rng=None): rng = np.random.default_rng() # sample number of galaxies - ntot = rng.poisson(ARCMIN2_SPHERE*ngal) + ngal = rng.poisson(np.multiply(ARCMIN2_SPHERE, ngal)) + + # extra dimensions of the output + dims = np.shape(ngal) + + # make sure ntot is an array even if scalar + ngal = np.broadcast_to(ngal, dims) + + # sample each set of points + for k in np.ndindex(dims): + + # sample uniformly over the sphere + lon = rng.uniform(-180, 180, size=ngal[k]) + lat = np.rad2deg(np.arcsin(rng.uniform(-1, 1, size=ngal[k]))) + + # report count + if dims: + count = np.zeros(dims, dtype=int) + count[k] = ngal[k] + else: + count = int(ngal[k]) + + yield lon, lat, count - # sample uniformly over the sphere - lon = rng.uniform(-180, 180, size=ntot) - lat = np.rad2deg(np.arcsin(rng.uniform(-1, 1, size=ntot))) - return lon, lat +def position_weights(densities, bias=None): + """Compute relative weights for angular clustering. + + Takes an array *densities* of densities in arbitrary units and + returns the relative weight of each shell. If *bias* is given, a + linear bias is applied to each shell. + + This is the equivalent of computing the product of normalised + redshift distribution and bias factor :math:`n(z) \\, b(z)` for the + discretised shells. + + Parameters + ---------- + densities : array_like + Density of points in each shell. The first axis must broadcast + against the number of shells, and is normalised internally. + bias : array_like, optional + Value or values of the linear bias parameter for each shell. + + Returns + ------- + weights : array_like + Relative weight of each shell for angular clustering. + + """ + # bring densities and bias into the same shape + if bias is not None: + densities, bias = broadcast_first(densities, bias) + # normalise densities after shape has been fixed + densities = densities / np.sum(densities, axis=0) + # apply bias after normalisation + if bias is not None: + densities = densities * bias + # densities now contains the relative contribution with bias applied + return densities diff --git a/glass/shapes.py b/glass/shapes.py index 2fdb529e..2d2d9acd 100644 --- a/glass/shapes.py +++ b/glass/shapes.py @@ -24,8 +24,10 @@ ''' +from __future__ import annotations import numpy as np +from numpy.typing import ArrayLike, NDArray def triaxial_axis_ratio(zeta, xi, size=None, *, rng=None): @@ -162,22 +164,25 @@ def ellipticity_ryden04(mu, sigma, gamma, sigma_gamma, size=None, *, rng=None): return e -def ellipticity_gaussian(size, sigma, *, rng=None): +def ellipticity_gaussian(count: int | ArrayLike, sigma: ArrayLike, *, + rng: np.random.Generator | None = None + ) -> NDArray: r'''Sample Gaussian galaxy ellipticities. - The ellipticities are sampled from a normal distribution with standard - deviation ``sigma`` for each component. Samples where the ellipticity is - larger than unity are discarded. Hence, do not use this function with too - large values of ``sigma``, or the sampling will become inefficient. + The ellipticities are sampled from a normal distribution with + standard deviation ``sigma`` for each component. Samples where the + ellipticity is larger than unity are discarded. Hence, do not use + this function with too large values of ``sigma``, or the sampling + will become inefficient. Parameters ---------- - size : int + count : int or array_like Number of ellipticities to be sampled. sigma : array_like Standard deviation in each component. rng : :class:`~numpy.random.Generator`, optional - Random number generator. If not given, a default RNG will be used. + Random number generator. If not given, a default RNG is used. Returns ------- @@ -190,33 +195,45 @@ def ellipticity_gaussian(size, sigma, *, rng=None): if rng is None: rng = np.random.default_rng() - # sample complex ellipticities - # reject those where abs(e) > 0 - e = rng.standard_normal(2*size, np.float64).view(np.complex128) - e *= sigma - i = np.where(np.abs(e) > 1)[0] - while len(i) > 0: - rng.standard_normal(2*len(i), np.float64, e[i].view(np.float64)) - e[i] *= sigma - i = i[np.abs(e[i]) > 1] - - return e + # bring inputs into common shape + count, sigma = np.broadcast_arrays(count, sigma) + # allocate flattened output array + eps = np.empty(count.sum(), dtype=np.complex128) -def ellipticity_intnorm(size, sigma, *, rng=None): + # sample complex ellipticities + # reject those where abs(e) > 0 + i = 0 + for k in np.ndindex(count.shape): + e = rng.standard_normal(2*count[k], np.float64).view(np.complex128) + e *= sigma[k] + r = np.where(np.abs(e) > 1)[0] + while len(r) > 0: + rng.standard_normal(2*len(r), np.float64, e[r].view(np.float64)) + e[r] *= sigma[k] + r = r[np.abs(e[r]) > 1] + eps[i:i+count[k]] = e + i += count[k] + + return eps + + +def ellipticity_intnorm(count: int | ArrayLike, sigma: ArrayLike, *, + rng: np.random.Generator | None = None + ) -> NDArray: r'''Sample galaxy ellipticities with intrinsic normal distribution. - The ellipticities are sampled from an intrinsic normal distribution with - standard deviation ``sigma`` for each component. + The ellipticities are sampled from an intrinsic normal distribution + with standard deviation ``sigma`` for each component. Parameters ---------- - size : int + count : int | array_like Number of ellipticities to sample. sigma : array_like Standard deviation of the ellipticity in each component. rng : :class:`~numpy.random.Generator`, optional - Random number generator. If not given, a default RNG will be used. + Random number generator. If not given, a default RNG is used. Returns ------- @@ -225,21 +242,31 @@ def ellipticity_intnorm(size, sigma, *, rng=None): ''' - # make sure sigma is admissible - if not 0 <= sigma < 0.5**0.5: - raise ValueError('sigma must be between 0 and sqrt(0.5)') - # default RNG if not provided if rng is None: rng = np.random.default_rng() + # bring inputs into common shape + count, sigma = np.broadcast_arrays(count, sigma) + + # make sure sigma is admissible + if not np.all((0 <= sigma) & (sigma < 0.5**0.5)): + raise ValueError('sigma must be between 0 and sqrt(0.5)') + # convert to sigma_eta using fit sigma_eta = sigma*((8 + 5*sigma**2)/(2 - 4*sigma**2))**0.5 - # sample complex ellipticities - e = rng.standard_normal(2*size, np.float64).view(np.complex128) - e *= sigma_eta - r = np.hypot(e.real, e.imag) - e *= np.divide(np.tanh(r/2), r, where=(r > 0), out=r) + # allocate flattened output array + eps = np.empty(count.sum(), dtype=np.complex128) - return e + # sample complex ellipticities + i = 0 + for k in np.ndindex(count.shape): + e = rng.standard_normal(2*count[k], np.float64).view(np.complex128) + e *= sigma_eta[k] + r = np.hypot(e.real, e.imag) + e *= np.divide(np.tanh(r/2), r, where=(r > 0), out=r) + eps[i:i+count[k]] = e + i += count[k] + + return eps diff --git a/glass/shells.py b/glass/shells.py index d410194b..b1667d6d 100644 --- a/glass/shells.py +++ b/glass/shells.py @@ -10,11 +10,15 @@ matter shells, i.e. the radial discretisation of the light cone. +.. _reference-window-functions: + Window functions ---------------- .. autoclass:: RadialWindow .. autofunction:: tophat_windows +.. autofunction:: linear_windows +.. autofunction:: cubic_windows Window function tools @@ -22,6 +26,7 @@ .. autofunction:: restrict .. autofunction:: partition +.. autofunction:: combine Redshift grids @@ -44,8 +49,7 @@ from collections import namedtuple import numpy as np -from .math import ndinterp - +from .core.array import ndinterp # type checking from typing import (Union, Sequence, List, Tuple, Optional, Callable, @@ -152,6 +156,10 @@ def tophat_windows(zbins: ArrayLike1D, dz: float = 1e-3, ws : (N,) list of :class:`RadialWindow` List of window functions. + See Also + -------- + :ref:`user-window-functions` + ''' if len(zbins) < 2: raise ValueError('zbins must have at least two entries') @@ -174,6 +182,118 @@ def tophat_windows(zbins: ArrayLike1D, dz: float = 1e-3, return ws +def linear_windows(zgrid: ArrayLike1D, dz: float = 1e-3, + weight: Optional[WeightFunc] = None, + ) -> List[RadialWindow]: + '''Linear interpolation window functions. + + Uses the *N+2* given redshifts as nodes to construct *N* triangular + window functions between the first and last node. These correspond + to linear interpolation of radial functions. The redshift spacing + of the windows is approximately equal to ``dz``. + + An optional weight function :math:`w(z)` can be given using + ``weight``; it is applied to the triangular windows. + + The resulting windows functions are :class:`RadialWindow` instances. + Their effective redshifts correspond to the given nodes. + + Parameters + ---------- + zgrid : (N+2,) array_like + Redshift grid for the triangular window functions. + dz : float, optional + Approximate spacing of the redshift grid. + weight : callable, optional + If given, a weight function to be applied to the window + functions. + + Returns + ------- + ws : (N,) list of :class:`RadialWindow` + List of window functions. + + See Also + -------- + :ref:`user-window-functions` + + ''' + if len(zgrid) < 3: + raise ValueError('nodes must have at least 3 entries') + if zgrid[0] != 0: + warnings.warn('first triangular window does not start at z=0') + + ws = [] + for zmin, zmid, zmax in zip(zgrid, zgrid[1:], zgrid[2:]): + n = max(round((zmid - zmin)/dz), 2) - 1 + m = max(round((zmax - zmid)/dz), 2) + z = np.concatenate([np.linspace(zmin, zmid, n, endpoint=False), + np.linspace(zmid, zmax, m)]) + w = np.concatenate([np.linspace(0., 1., n, endpoint=False), + np.linspace(1., 0., m)]) + if weight is not None: + w *= weight(z) + ws.append(RadialWindow(z, w, zmid)) + return ws + + +def cubic_windows(zgrid: ArrayLike1D, dz: float = 1e-3, + weight: Optional[WeightFunc] = None, + ) -> List[RadialWindow]: + '''Cubic interpolation window functions. + + Uses the *N+2* given redshifts as nodes to construct *N* cubic + Hermite spline window functions between the first and last node. + These correspond to cubic spline interpolation of radial functions. + The redshift spacing of the windows is approximately equal to + ``dz``. + + An optional weight function :math:`w(z)` can be given using + ``weight``; it is applied to the cubic spline windows. + + The resulting windows functions are :class:`RadialWindow` instances. + Their effective redshifts correspond to the given nodes. + + Parameters + ---------- + zgrid : (N+2,) array_like + Redshift grid for the cubic spline window functions. + dz : float, optional + Approximate spacing of the redshift grid. + weight : callable, optional + If given, a weight function to be applied to the window + functions. + + Returns + ------- + ws : (N,) list of :class:`RadialWindow` + List of window functions. + + See Also + -------- + :ref:`user-window-functions` + + ''' + if len(zgrid) < 3: + raise ValueError('nodes must have at least 3 entries') + if zgrid[0] != 0: + warnings.warn('first cubic spline window does not start at z=0') + + ws = [] + for zmin, zmid, zmax in zip(zgrid, zgrid[1:], zgrid[2:]): + n = max(round((zmid - zmin)/dz), 2) - 1 + m = max(round((zmax - zmid)/dz), 2) + z = np.concatenate([np.linspace(zmin, zmid, n, endpoint=False), + np.linspace(zmid, zmax, m)]) + u = np.linspace(0., 1., n, endpoint=False) + v = np.linspace(1., 0., m) + w = np.concatenate([u**2*(3-2*u), v**2*(3-2*v)]) + if weight is not None: + w *= weight(z) + ws.append(RadialWindow(z, w, zmid)) + return ws + + def restrict(z: ArrayLike1D, f: ArrayLike1D, w: RadialWindow ) -> Tuple[np.ndarray, np.ndarray]: '''Restrict a function to a redshift window. @@ -212,45 +332,237 @@ def restrict(z: ArrayLike1D, f: ArrayLike1D, w: RadialWindow return zr, fr -def partition(z: ArrayLike1D, f: ArrayLike1D, ws: Sequence[RadialWindow] - ) -> Tuple[List[np.ndarray], List[np.ndarray]]: - '''Partition a function by a sequence of windows. +def partition(z: ArrayLike, + fz: ArrayLike, + shells: Sequence[RadialWindow], + *, + method: str = "nnls", + ) -> ArrayLike: + """Partition a function by a sequence of windows. - Partitions the given function into a sequence of functions - restricted to each window function. + Returns a vector of weights :math:`x_1, x_2, \\ldots` such that the + weighted sum of normalised radial window functions :math:`x_1 \\, + w_1(z) + x_2 \\, w_2(z) + \\ldots` approximates the given function + :math:`f(z)`. - The function :math:`f(z)` is given by redshifts ``z`` of shape - *(N,)* and function values ``f`` of shape *(..., N)*, with any - number of leading axes allowed. + The function :math:`f(z)` is given by redshifts *z* of shape *(N,)* + and function values *fz* of shape *(..., N)*, with any number of + leading axes allowed. - The window functions are given by the sequence ``ws`` of + The window functions are given by the sequence *shells* of :class:`RadialWindow` or compatible entries. - The partitioned functions have redshifts that are the union of the - redshifts of the original function and each window over the support - of said window. Intermediate function values are found by linear - interpolation - Parameters ---------- - z, f : array_like - The function to be partitioned. - ws : sequence of :class:`RadialWindow` + z, fz : array_like + The function to be partitioned. If *f* is multi-dimensional, + its last axis must agree with *z*. + shells : sequence of :class:`RadialWindow` Ordered sequence of window functions for the partition. + method : {"lstsq", "nnls", "restrict"} + Method for the partition. See notes for description. Returns ------- - zp, fp : list of array - The partitioned functions, ordered as the given windows. + x : array_like + Weights of the partition, where the leading axis corresponds to + *shells*. + + Notes + ----- + Formally, if :math:`w_i` are the normalised window functions, + :math:`f` is the target function, and :math:`z_i` is a redshift grid + with intervals :math:`\\Delta z_i`, the partition problem seeks an + approximate solution of + + .. math:: + \\begin{pmatrix} + w_1(z_1) \\Delta z_1 & w_2(z_1) \\, \\Delta z_1 & \\cdots \\\\ + w_1(z_2) \\Delta z_2 & w_2(z_2) \\, \\Delta z_2 & \\cdots \\\\ + \\vdots & \\vdots & \\ddots + \\end{pmatrix} \\, \\begin{pmatrix} + x_1 \\\\ x_2 \\\\ \\vdots + \\end{pmatrix} = \\begin{pmatrix} + f(z_1) \\, \\Delta z_1 \\\\ f(z_2) \\, \\Delta z_2 \\\\ \\vdots + \\end{pmatrix} \\;. + + The redshift grid is the union of the given array *z* and the + redshift arrays of all window functions. Intermediate function + values are found by linear interpolation. + + When partitioning a density function, it is usually desirable to + keep the normalisation fixed. In that case, the problem can be + enhanced with the further constraint that the sum of the solution + equals the integral of the target function, + + .. math:: + \\begin{pmatrix} + w_1(z_1) \\Delta z_1 & w_2(z_1) \\, \\Delta z_1 & \\cdots \\\\ + w_1(z_2) \\Delta z_2 & w_2(z_2) \\, \\Delta z_2 & \\cdots \\\\ + \\vdots & \\vdots & \\ddots \\\\ + \\hline + \\lambda & \\lambda & \\cdots + \\end{pmatrix} \\, \\begin{pmatrix} + x_1 \\\\ x_2 \\\\ \\vdots + \\end{pmatrix} = \\begin{pmatrix} + f(z_1) \\, \\Delta z_1 \\\\ f(z_2) \\, \\Delta z_2 \\\\ \\vdots + \\\\ \\hline \\lambda \\int \\! f(z) \\, dz + \\end{pmatrix} \\;, + + where :math:`\\lambda` is a multiplier to enforce the integral + contraints. + + The :func:`partition()` function implements a number of methods to + obtain a solution: + + If ``method="nnls"`` (the default), obtain a partition from a + non-negative least-squares solution. This will usually match the + shape of the input function closely. The contribution from each + shell is a positive number, which is required to partition e.g. + density functions. + + If ``method="lstsq"``, obtain a partition from an unconstrained + least-squares solution. This will more closely match the shape of + the input function, but might lead to shells with negative + contributions. + + If ``method="restrict"``, obtain a partition by integrating the + restriction (using :func:`restrict`) of the function :math:`f` to + each window. For overlapping shells, this method might produce + results which are far from the input function. + + """ + try: + partition_method = globals()[f"partition_{method}"] + except KeyError: + raise ValueError(f"invalid method: {method}") from None + return partition_method(z, fz, shells) + + +def partition_lstsq( + z: ArrayLike, + fz: ArrayLike, + shells: Sequence[RadialWindow], + *, + sumtol: float = 0.01, +) -> ArrayLike: + """Least-squares partition.""" + + # make sure nothing breaks + if sumtol < 1e-4: + sumtol = 1e-4 + + # compute the union of all given redshift grids + zp = z + for w in shells: + zp = np.union1d(zp, w.za) + + # get extra leading axes of fz + *dims, _ = np.shape(fz) + + # compute grid spacing + dz = np.gradient(zp) + + # create the window function matrix + a = [np.interp(zp, za, wa, left=0., right=0.) for za, wa, _ in shells] + a = a/np.trapz(a, zp, axis=-1)[..., None] + a = a*dz + + # create the target vector of distribution values + b = ndinterp(zp, z, fz, left=0., right=0.) + b = b*dz + + # append a constraint for the integral + mult = 1/sumtol + a = np.concatenate([a, mult * np.ones((len(shells), 1))], axis=-1) + b = np.concatenate([b, mult * np.reshape(np.trapz(fz, z), (*dims, 1))], axis=-1) + + # now a is a matrix of shape (len(shells), len(zp) + 1) + # and b is a matrix of shape (*dims, len(zp) + 1) + # need to find weights x such that b == x @ a over all axes of b + # do the least-squares fit over partially flattened b, then reshape + x = np.linalg.lstsq(a.T, b.reshape(-1, zp.size + 1).T, rcond=None)[0] + x = x.T.reshape(*dims, len(shells)) + # roll the last axis of size len(shells) to the front + x = np.moveaxis(x, -1, 0) + # all done + return x + + +def partition_nnls( + z: ArrayLike, + fz: ArrayLike, + shells: Sequence[RadialWindow], + *, + sumtol: float = 0.01, +) -> ArrayLike: + """Non-negative least-squares partition. + + Uses the ``nnls()`` algorithm from ``scipy.optimize`` and thus + requires SciPy. + + """ + + from .core.algorithm import nnls + + # make sure nothing breaks + if sumtol < 1e-4: + sumtol = 1e-4 + + # compute the union of all given redshift grids + zp = z + for w in shells: + zp = np.union1d(zp, w.za) + + # get extra leading axes of fz + *dims, _ = np.shape(fz) + + # compute grid spacing + dz = np.gradient(zp) + + # create the window function matrix + a = [np.interp(zp, za, wa, left=0., right=0.) for za, wa, _ in shells] + a = a/np.trapz(a, zp, axis=-1)[..., None] + a = a*dz + + # create the target vector of distribution values + b = ndinterp(zp, z, fz, left=0., right=0.) + b = b*dz + + # append a constraint for the integral + mult = 1/sumtol + a = np.concatenate([a, mult * np.ones((len(shells), 1))], axis=-1) + b = np.concatenate([b, mult * np.reshape(np.trapz(fz, z), (*dims, 1))], axis=-1) + + # now a is a matrix of shape (len(shells), len(zp) + 1) + # and b is a matrix of shape (*dims, len(zp) + 1) + # for each dim, find non-negative weights x such that b == a.T @ x + + # reduce the dimensionality of the problem using a thin QR decomposition + q, r = np.linalg.qr(a.T) + y = np.einsum('ji,...j', q, b) + + # for each dim, find non-negative weights x such that y == r @ x + x = np.empty([len(shells)] + dims) + for i in np.ndindex(*dims): + x[(...,) + i] = nnls(r, y[i]) + + # all done + return x - ''' - zp, fp = [], [] - for w in ws: - zr, fr = restrict(z, f, w) - zp.append(zr) - fp.append(fr) - return zp, fp +def partition_restrict( + z: ArrayLike, + fz: ArrayLike, + shells: Sequence[RadialWindow], +) -> ArrayLike: + """Partition by restriction and integration.""" + + part = np.empty((len(shells),) + np.shape(fz)[:-1]) + for i, w in enumerate(shells): + zr, fr = restrict(z, fz, w) + part[i] = np.trapz(fr, zr, axis=-1) + return part def redshift_grid(zmin, zmax, *, dz=None, num=None): @@ -274,3 +586,50 @@ def distance_grid(cosmo, zmin, zmax, *, dx=None, num=None): else: raise ValueError('exactly one of "dx" or "num" must be given') return cosmo.dc_inv(x) + + +def combine( + z: ArrayLike, + weights: ArrayLike, + shells: Sequence[RadialWindow], +) -> ArrayLike: + """Evaluate a linear combination of window functions. + + Takes a vector of weights :math:`x_1, x_2, \\ldots` and computes the + weighted sum of normalised radial window functions :math:`f(z) = x_1 + \\, w_1(z) + x_2 \\, w_2(z) + \\ldots` in the given redshifts + :math:`z`. + + The window functions are given by the sequence *shells* of + :class:`RadialWindow` or compatible entries. + + Parameters + ---------- + z : array_like + Redshifts *z* in which to evaluate the combined function. + weights : array_like + Weights of the linear combination, where the leading axis + corresponds to *shells*. + shells : sequence of :class:`RadialWindow` + Ordered sequence of window functions to be combined. + + Returns + ------- + fz : array_like + Linear combination of window functions, evaluated in *z*. + + See Also + -------- + partition : Find weights for a given function. + + """ + return sum( + np.expand_dims(weight, -1) * np.interp( + z, + shell.za, + shell.wa / np.trapz(shell.wa, shell.za), + left=0., + right=0., + ) + for shell, weight in zip(shells, weights) + ) diff --git a/glass/test/test_dummy.py b/glass/test/test_dummy.py deleted file mode 100644 index 10cf3ad0..00000000 --- a/glass/test/test_dummy.py +++ /dev/null @@ -1,2 +0,0 @@ -def test_dummy(): - pass diff --git a/glass/test/test_fields.py b/glass/test/test_fields.py new file mode 100644 index 00000000..d273a02a --- /dev/null +++ b/glass/test/test_fields.py @@ -0,0 +1,8 @@ +def test_getcl(): + from glass.fields import getcl + # make a mock Cls array with the index pairs as entries + cls = [{i, j} for i in range(10) for j in range(i, -1, -1)] + # make sure indices are retrieved correctly + for i in range(10): + for j in range(10): + assert getcl(cls, i, j) == {i, j} diff --git a/glass/test/test_galaxies.py b/glass/test/test_galaxies.py new file mode 100644 index 00000000..c852b7d0 --- /dev/null +++ b/glass/test/test_galaxies.py @@ -0,0 +1,171 @@ +import pytest + + +def test_redshifts(): + from unittest.mock import Mock + import numpy as np + from glass.galaxies import redshifts + + # create a mock radial window function + w = Mock() + w.za = np.linspace(0., 1., 20) + w.wa = np.exp(-0.5*(w.za - 0.5)**2/0.1**2) + + # sample redshifts (scalar) + z = redshifts(13, w) + assert z.shape == (13,) + assert z.min() >= 0. and z.max() <= 1. + + # sample redshifts (array) + z = redshifts([[1, 2], [3, 4]], w) + assert z.shape == (10,) + + +def test_redshifts_from_nz(): + import numpy as np + from glass.galaxies import redshifts_from_nz + + # test sampling + + redshifts = redshifts_from_nz(10, [0, 1, 2, 3, 4], [1, 0, 0, 0, 0]) + assert np.all((0 <= redshifts) & (redshifts <= 1)) + + redshifts = redshifts_from_nz(10, [0, 1, 2, 3, 4], [0, 0, 1, 0, 0]) + assert np.all((1 <= redshifts) & (redshifts <= 3)) + + redshifts = redshifts_from_nz(10, [0, 1, 2, 3, 4], [0, 0, 0, 0, 1]) + assert np.all((3 <= redshifts) & (redshifts <= 4)) + + redshifts = redshifts_from_nz(10, [0, 1, 2, 3, 4], [0, 0, 1, 1, 1]) + assert not np.any(redshifts <= 1) + + # test interface + + # case: no extra dimensions + + count = 10 + z = np.linspace(0, 1, 100) + nz = z*(1-z) + + redshifts = redshifts_from_nz(count, z, nz) + + assert redshifts.shape == (count,) + assert np.all((0 <= redshifts) & (redshifts <= 1)) + + # case: extra dimensions from count + + count = [10, 20, 30] + z = np.linspace(0, 1, 100) + nz = z*(1-z) + + redshifts = redshifts_from_nz(count, z, nz) + + assert np.shape(redshifts) == (60,) + + # case: extra dimensions from nz + + count = 10 + z = np.linspace(0, 1, 100) + nz = [z*(1-z), (z-0.5)**2] + + redshifts = redshifts_from_nz(count, z, nz) + + assert redshifts.shape == (20,) + + # case: extra dimensions from count and nz + + count = [[10], [20], [30]] + z = np.linspace(0, 1, 100) + nz = [z*(1-z), (z-0.5)**2] + + redshifts = redshifts_from_nz(count, z, nz) + + assert redshifts.shape == (120,) + + # case: incompatible input shapes + + count = [10, 20, 30] + z = np.linspace(0, 1, 100) + nz = [z*(1-z), (z-0.5)**2] + + with pytest.raises(ValueError): + redshifts_from_nz(count, z, nz) + + +def test_gaussian_phz(): + import numpy as np + from glass.galaxies import gaussian_phz + + # test sampling + + # case: zero variance + + z = np.linspace(0, 1, 100) + sigma_0 = 0. + + phz = gaussian_phz(z, sigma_0) + + np.testing.assert_array_equal(z, phz) + + # case: truncated normal + + z = 0. + sigma_0 = np.ones(100) + + phz = gaussian_phz(z, sigma_0) + + assert phz.shape == (100,) + assert np.all(phz >= 0) + + # case: upper and lower bound + + z = 1. + sigma_0 = np.ones(100) + + phz = gaussian_phz(z, sigma_0, lower=0.5, upper=1.5) + + assert phz.shape == (100,) + assert np.all(phz >= 0.5) + assert np.all(phz <= 1.5) + + # test interface + + # case: scalar redshift, scalar sigma_0 + + z = 1. + sigma_0 = 0. + + phz = gaussian_phz(z, sigma_0) + + assert np.ndim(phz) == 0 + assert phz == z + + # case: array redshift, scalar sigma_0 + + z = np.linspace(0, 1, 10) + sigma_0 = 0. + + phz = gaussian_phz(z, sigma_0) + + assert phz.shape == (10,) + np.testing.assert_array_equal(z, phz) + + # case: scalar redshift, array sigma_0 + + z = 1. + sigma_0 = np.zeros(10) + + phz = gaussian_phz(z, sigma_0) + + assert phz.shape == (10,) + np.testing.assert_array_equal(z, phz) + + # case: array redshift, array sigma_0 + + z = np.linspace(0, 1, 10) + sigma_0 = np.zeros((11, 1)) + + phz = gaussian_phz(z, sigma_0) + + assert phz.shape == (11, 10) + np.testing.assert_array_equal(np.broadcast_to(z, (11, 10)), phz) diff --git a/glass/test/test_lensing.py b/glass/test/test_lensing.py new file mode 100644 index 00000000..da0780de --- /dev/null +++ b/glass/test/test_lensing.py @@ -0,0 +1,133 @@ +import numpy as np +import numpy.testing as npt +import pytest + + +@pytest.fixture +def shells(): + from glass.shells import RadialWindow + + shells = [ + RadialWindow([0., 1., 2.], [0., 1., 0.], 1.), + RadialWindow([1., 2., 3.], [0., 1., 0.], 2.), + RadialWindow([2., 3., 4.], [0., 1., 0.], 3.), + RadialWindow([3., 4., 5.], [0., 1., 0.], 4.), + RadialWindow([4., 5., 6.], [0., 1., 0.], 5.), + ] + + return shells + + +@pytest.fixture +def cosmo(): + class MockCosmology: + + @property + def omega_m(self): + return 0.3 + + def ef(self, z): + return (self.omega_m * (1 + z)**3 + 1 - self.omega_m) ** 0.5 + + def xm(self, z, z2=None): + if z2 is None: + return np.array(z) * 1000 + else: + return (np.array(z2) - np.array(z)) * 1000 + + return MockCosmology() + + +@pytest.mark.parametrize("usecomplex", [True, False]) +def test_deflect_nsew(usecomplex): + from glass.lensing import deflect + + d = 5. + r = np.radians(d) + + if usecomplex: + def alpha(re, im): + return re + 1j*im + else: + def alpha(re, im): + return [re, im] + + # north + lon, lat = deflect(0., 0., alpha(r, 0)) + assert np.allclose([lon, lat], [0., d]) + + # south + lon, lat = deflect(0., 0., alpha(-r, 0)) + assert np.allclose([lon, lat], [0., -d]) + + # east + lon, lat = deflect(0., 0., alpha(0, r)) + assert np.allclose([lon, lat], [-d, 0.]) + + # west + lon, lat = deflect(0., 0., alpha(0, -r)) + assert np.allclose([lon, lat], [d, 0.]) + + +def test_deflect_many(): + import healpix + from glass.lensing import deflect + + n = 1000 + abs_alpha = np.random.uniform(0, 2*np.pi, size=n) + arg_alpha = np.random.uniform(-np.pi, np.pi, size=n) + + lon_ = np.degrees(np.random.uniform(-np.pi, np.pi, size=n)) + lat_ = np.degrees(np.arcsin(np.random.uniform(-1, 1, size=n))) + + lon, lat = deflect(lon_, lat_, abs_alpha*np.exp(1j*arg_alpha)) + + x_, y_, z_ = healpix.ang2vec(lon_, lat_, lonlat=True) + x, y, z = healpix.ang2vec(lon, lat, lonlat=True) + + dotp = x*x_ + y*y_ + z*z_ + + npt.assert_allclose(dotp, np.cos(abs_alpha)) + + +def test_multi_plane_matrix(shells, cosmo): + from glass.lensing import MultiPlaneConvergence, multi_plane_matrix + + mat = multi_plane_matrix(shells, cosmo) + + npt.assert_array_equal(mat, np.tril(mat)) + npt.assert_array_equal(np.triu(mat, 1), 0) + + convergence = MultiPlaneConvergence(cosmo) + + deltas = np.random.rand(len(shells), 10) + kappas = [] + for shell, delta in zip(shells, deltas): + convergence.add_window(delta, shell) + kappas.append(convergence.kappa.copy()) + + npt.assert_allclose(mat @ deltas, kappas) + + +def test_multi_plane_weights(shells, cosmo): + from glass.lensing import MultiPlaneConvergence, multi_plane_weights + + w_in = np.eye(len(shells)) + w_out = multi_plane_weights(w_in, shells, cosmo) + + npt.assert_array_equal(w_out, np.triu(w_out, 1)) + npt.assert_array_equal(np.tril(w_out), 0) + + convergence = MultiPlaneConvergence(cosmo) + + deltas = np.random.rand(len(shells), 10) + weights = np.random.rand(len(shells), 3) + kappa = 0 + for shell, delta, weight in zip(shells, deltas, weights): + convergence.add_window(delta, shell) + kappa = kappa + weight[..., None] * convergence.kappa + kappa /= weights.sum(axis=0)[..., None] + + wmat = multi_plane_weights(weights, shells, cosmo) + + npt.assert_allclose(np.einsum('ij,ik', wmat, deltas), kappa) diff --git a/glass/test/test_points.py b/glass/test/test_points.py new file mode 100644 index 00000000..fd4f9bf6 --- /dev/null +++ b/glass/test/test_points.py @@ -0,0 +1,119 @@ +import numpy as np +import numpy.testing as npt + + +def catpos(pos): + lon, lat, cnt = [], [], 0 + for lo, la, co in pos: + lon = np.concatenate([lon, lo]) + lat = np.concatenate([lat, la]) + cnt = cnt + co + return lon, lat, cnt + + +def test_positions_from_delta(): + from glass.points import positions_from_delta + + # case: single-dimensional input + + ngal = 1e-3 + delta = np.zeros(12) + bias = 0.8 + vis = np.ones(12) + + lon, lat, cnt = catpos(positions_from_delta(ngal, delta, bias, vis)) + + assert isinstance(cnt, int) + assert lon.shape == lat.shape == (cnt,) + + # case: multi-dimensional ngal + + ngal = [1e-3, 2e-3] + delta = np.zeros(12) + bias = 0.8 + vis = np.ones(12) + + lon, lat, cnt = catpos(positions_from_delta(ngal, delta, bias, vis)) + + assert cnt.shape == (2,) + assert lon.shape == (cnt.sum(),) + assert lat.shape == (cnt.sum(),) + + # case: multi-dimensional delta + + ngal = 1e-3 + delta = np.zeros((3, 2, 12)) + bias = 0.8 + vis = np.ones(12) + + lon, lat, cnt = catpos(positions_from_delta(ngal, delta, bias, vis)) + + assert cnt.shape == (3, 2) + assert lon.shape == (cnt.sum(),) + assert lat.shape == (cnt.sum(),) + + # case: multi-dimensional broadcasting + + ngal = [1e-3, 2e-3] + delta = np.zeros((3, 1, 12)) + bias = 0.8 + vis = np.ones(12) + + lon, lat, cnt = catpos(positions_from_delta(ngal, delta, bias, vis)) + + assert cnt.shape == (3, 2) + assert lon.shape == (cnt.sum(),) + assert lat.shape == (cnt.sum(),) + + +def test_uniform_positions(): + from glass.points import uniform_positions + + # case: scalar input + + ngal = 1e-3 + + lon, lat, cnt = catpos(uniform_positions(ngal)) + + assert isinstance(cnt, int) + assert lon.shape == lat.shape == (cnt,) + + # case: 1-D array input + + ngal = [1e-3, 2e-3, 3e-3] + + lon, lat, cnt = catpos(uniform_positions(ngal)) + + assert cnt.shape == (3,) + assert lon.shape == lat.shape == (cnt.sum(),) + + # case: 2-D array input + + ngal = [[1e-3, 2e-3], [3e-3, 4e-3], [5e-3, 6e-3]] + + lon, lat, cnt = catpos(uniform_positions(ngal)) + + assert cnt.shape == (3, 2) + assert lon.shape == lat.shape == (cnt.sum(),) + + +def test_position_weights(): + from glass.points import position_weights + + for bshape in None, (), (100,), (100, 1): + for cshape in (100,), (100, 50), (100, 3, 2): + + counts = np.random.rand(*cshape) + bias = None if bshape is None else np.random.rand(*bshape) + + weights = position_weights(counts, bias) + + expected = counts / counts.sum(axis=0, keepdims=True) + if bias is not None: + if np.ndim(bias) > np.ndim(expected): + expected = np.expand_dims(expected, tuple(range(np.ndim(expected), np.ndim(bias)))) + else: + bias = np.expand_dims(bias, tuple(range(np.ndim(bias), np.ndim(expected)))) + expected = bias * expected + + npt.assert_allclose(weights, expected) diff --git a/glass/test/test_shapes.py b/glass/test/test_shapes.py index 6a49a1a4..3a4100c0 100644 --- a/glass/test/test_shapes.py +++ b/glass/test/test_shapes.py @@ -1,4 +1,32 @@ import numpy as np +import pytest + + +def test_ellipticity_gaussian(): + + from glass.shapes import ellipticity_gaussian + + n = 1_000_000 + + eps = ellipticity_gaussian(n, 0.256) + + assert eps.shape == (n,) + + assert np.all(np.abs(eps) < 1) + + assert np.isclose(np.std(eps.real), 0.256, atol=1e-3, rtol=0) + assert np.isclose(np.std(eps.imag), 0.256, atol=1e-3, rtol=0) + + eps = ellipticity_gaussian([n, n], [0.128, 0.256]) + + assert eps.shape == (2*n,) + + assert np.all(np.abs(eps) < 1) + + assert np.isclose(np.std(eps.real[:n]), 0.128, atol=1e-3, rtol=0) + assert np.isclose(np.std(eps.imag[:n]), 0.128, atol=1e-3, rtol=0) + assert np.isclose(np.std(eps.real[n:]), 0.256, atol=1e-3, rtol=0) + assert np.isclose(np.std(eps.imag[n:]), 0.256, atol=1e-3, rtol=0) def test_ellipticity_intnorm(): @@ -9,9 +37,23 @@ def test_ellipticity_intnorm(): eps = ellipticity_intnorm(n, 0.256) - assert eps.size == n + assert eps.shape == (n,) assert np.all(np.abs(eps) < 1) assert np.isclose(np.std(eps.real), 0.256, atol=1e-3, rtol=0) assert np.isclose(np.std(eps.imag), 0.256, atol=1e-3, rtol=0) + + eps = ellipticity_intnorm([n, n], [0.128, 0.256]) + + assert eps.shape == (2*n,) + + assert np.all(np.abs(eps) < 1) + + assert np.isclose(np.std(eps.real[:n]), 0.128, atol=1e-3, rtol=0) + assert np.isclose(np.std(eps.imag[:n]), 0.128, atol=1e-3, rtol=0) + assert np.isclose(np.std(eps.real[n:]), 0.256, atol=1e-3, rtol=0) + assert np.isclose(np.std(eps.imag[n:]), 0.256, atol=1e-3, rtol=0) + + with pytest.raises(ValueError): + ellipticity_intnorm(1, 0.71) diff --git a/glass/test/test_shells.py b/glass/test/test_shells.py index 330679cf..10f76318 100644 --- a/glass/test/test_shells.py +++ b/glass/test/test_shells.py @@ -1,5 +1,5 @@ import numpy as np -import numpy.testing as npt +import pytest def test_tophat_windows(): @@ -49,38 +49,28 @@ def test_restrict(): assert fr[i] == fi*np.interp(zi, w.za, w.wa) -def test_partition(): - from glass.shells import partition, RadialWindow +@pytest.mark.parametrize("method", ["lstsq", "nnls", "restrict"]) +def test_partition(method): + import numpy as np + from glass.shells import RadialWindow, partition - # Gaussian test function - z = np.linspace(0., 5., 1000) - f = np.exp(-((z - 2.)/0.5)**2/2) - - # overlapping triangular weight functions - ws = [RadialWindow(za=[0., 1., 2.], wa=[0., 1., 0.], zeff=None), - RadialWindow(za=[1., 2., 3.], wa=[0., 1., 0.], zeff=None), - RadialWindow(za=[2., 3., 4.], wa=[0., 1., 0.], zeff=None), - RadialWindow(za=[3., 4., 5.], wa=[0., 1., 0.], zeff=None)] + shells = [ + RadialWindow(np.array([0., 1.]), np.array([1., 0.]), 0.0), + RadialWindow(np.array([0., 1., 2.]), np.array([0., 1., 0.]), 0.5), + RadialWindow(np.array([1., 2., 3.]), np.array([0., 1., 0.]), 1.5), + RadialWindow(np.array([2., 3., 4.]), np.array([0., 1., 0.]), 2.5), + RadialWindow(np.array([3., 4., 5.]), np.array([0., 1., 0.]), 3.5), + RadialWindow(np.array([4., 5.]), np.array([0., 1.]), 5.0), + ] - zp, fp = partition(z, f, ws) - - assert len(zp) == len(fp) == len(ws) - - for zr, w in zip(zp, ws): - assert np.all((zr >= w.za[0]) & (zr <= w.za[-1])) - - for zr, fr, w in zip(zp, fp, ws): - f_ = np.interp(zr, z, f, left=0., right=0.) - w_ = np.interp(zr, w.za, w.wa, left=0., right=0.) - npt.assert_allclose(fr, f_*w_) + z = np.linspace(0., 5., 1000) + k = 1 + np.arange(6).reshape(3, 2, 1) + fz = np.exp(-z / k) - f_ = sum(np.interp(z, zr, fr, left=0., right=0.) - for zr, fr in zip(zp, fp)) + assert fz.shape == (3, 2, 1000) - # first and last points have zero total weight - assert f_[0] == f_[-1] == 0. + part = partition(z, fz, shells, method=method) - # find first and last index where total weight becomes unity - i, j = np.searchsorted(z, [ws[0].za[1], ws[-1].za[1]]) + assert part.shape == (len(shells), 3, 2) - npt.assert_allclose(f_[i:j], f[i:j], atol=1e-15) + assert np.allclose(part.sum(axis=0), np.trapz(fz, z)) diff --git a/glass/user.py b/glass/user.py index 28c3969f..af2f13bd 100644 --- a/glass/user.py +++ b/glass/user.py @@ -16,24 +16,10 @@ .. autofunction:: save_cls .. autofunction:: load_cls - -Profiling ---------- - -.. autoclass:: Profiler -.. autofunction:: profile - ''' -import logging -import time -import tracemalloc -from datetime import timedelta -from contextlib import contextmanager import numpy as np -logger = logging.getLogger(__name__) - def save_cls(filename, cls): '''Save a list of Cls to file. @@ -59,65 +45,3 @@ def load_cls(filename): values = npz['values'] split = npz['split'] return np.split(values, split) - - -def _memf(n: int): - '''Format a number of bytes in human-readable form.''' - n = float(n) - for x in ['', 'k', 'M', 'G', 'T']: - if n < 1000: - break - n /= 1000 - return f'{n:.2f}{x}' - - -class Profiler: - '''Simple procedural profiling.''' - - DEFAULT_LOGGER: logging.Logger = logger - '''The default logger to use for instances of this class.''' - - def __init__(self, logger: logging.Logger = None): - '''Create a new instance of a profiler.''' - self._logger = logger if logger is not None else self.DEFAULT_LOGGER - self._time = {} - - def start(self, message: str = None): - '''Start profiler.''' - self.log(message, 'start') - - def stop(self, message: str = None): - '''Stop profiler.''' - self.log(message, 'start') - self._time = {} - - def log(self, message: str = None, timer: str = None): - '''Log a profiling point.''' - t0 = self._time.get(timer) - t1 = self._time[timer] = time.monotonic() - parts = [] - if message is not None: - parts.append(message) - if t0 is not None: - parts.append(f'time {timedelta(seconds=t1-t0)}') - if tracemalloc.is_tracing(): - current, peak = tracemalloc.get_traced_memory() - parts.append(f'mem {_memf(current)}') - parts.append(f'peak {_memf(peak)}') - msg = ' -- '.join(parts) - self._logger.info('### %s', msg) - - def loop(self, message: str = None): - '''Log a profiling point for a loop.''' - self.log(message, 'loop') - - -@contextmanager -def profile(message: str = None, logger: logging.Logger = None): - '''Context manager for simple profiling.''' - prof = Profiler(logger) - try: - prof.start(message) - yield prof - finally: - prof.stop(message) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..af1e1db7 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,51 @@ +[build-system] +requires = ["hatchling", "hatch-vcs"] +build-backend = "hatchling.build" + +[project] +name = "glass" +description = "Generator for Large Scale Structure" +readme = "README.md" +requires-python = ">=3.6" +license = {file = "LICENSE"} +maintainers = [ + {name = "Nicolas Tessore", email = "n.tessore@ucl.ac.uk"}, +] +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", +] +dependencies = [ + "numpy>=1.20.0", + "healpix>=2022.11.1", + "healpy>=1.15.0", + "cosmology>=2022.10.9", + "gaussiancl>=2022.10.21", +] +dynamic = ["version"] + +[project.optional-dependencies] +test = [ + "pytest", + "scipy", +] +docs = [ + "sphinx", + "furo", + "sphinxcontrib-katex", + "numpydoc", + "matplotlib", +] + +[project.urls] +Homepage = "https://github.com/glass-dev/glass" +Documentation = "https://glass.readthedocs.io/" +Issues = "https://github.com/glass-dev/glass/issues" +Changelog = "https://glass.readthedocs.io/en/stable/manual/releases.html" + +[tool.hatch] +version.source = "vcs" + +[tool.hatch.build.hooks.vcs] +version-file = "glass/_version.py" diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 3a1b6640..00000000 --- a/setup.cfg +++ /dev/null @@ -1,44 +0,0 @@ -[metadata] -name = glass -version = 2023.2-dev -maintainer = Nicolas Tessore -maintainer_email = n.tessore@ucl.ac.uk -description = Generator for Large Scale Structure -long_description = file: README.md -long_description_content_type = text/markdown -license = MIT -license_file = LICENSE -url = https://github.com/glass-dev/glass -project_urls = - Documentation = https://glass.readthedocs.io/ - Issues = https://github.com/glass-dev/glass/issues -classifiers = - Programming Language :: Python :: 3 - License :: OSI Approved :: MIT License - Operating System :: OS Independent - -[options] -python_requires = >=3.6 -install_requires = - numpy - healpix>=2022.11.1 - healpy>=1.15.0 - cosmology>=2022.10.9 - gaussiancl>=2022.10.21 -packages = find_namespace: - -[options.packages.find] -include = glass* - -[options.extras_require] -test = - pytest -docs = - sphinx - furo - sphinxcontrib-katex - numpydoc - matplotlib - -[flake8] -ignore = E226,E501,E741 diff --git a/setup.py b/setup.py deleted file mode 100644 index 60684932..00000000 --- a/setup.py +++ /dev/null @@ -1,3 +0,0 @@ -from setuptools import setup - -setup() diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 4d5a2d8f..00000000 --- a/tox.ini +++ /dev/null @@ -1,24 +0,0 @@ -[tox] -envlist = - test - flake8 - docs - -[testenv] -changedir = . -extras = test -commands = - pip freeze - pytest glass {posargs} - -[testenv:flake8] -changedir = . -skip_install = true -deps = flake8 -commands = flake8 glass - -[testenv:docs] -changedir = . -extras = docs -commands = - sphinx-build -W -b html -d "{envtmpdir}/doctrees" docs "{envtmpdir}/html" --color {posargs}