diff --git a/.codemeta/codemeta_base.json b/.codemeta/codemeta_base.json new file mode 100644 index 000000000..99132a33b --- /dev/null +++ b/.codemeta/codemeta_base.json @@ -0,0 +1,72 @@ +{ + "@context": "https://w3id.org/codemeta/3.0", + "@type": "SoftwareSourceCode", + "targetProduct": { + "@type": "SoftwareLibrary", + "name": "brian2", + "runtimePlatform": [ + "Python", + "Python 3" + ] + }, + "maintainer": [ + { + "@id": "https://orcid.org/0000-0002-2648-4790" + } + ], + "author": [ + { + "@id": "https://orcid.org/0000-0002-2648-4790", + "@type": "Person", + "familyName": "Stimberg", + "givenName": "Marcel", + "identifier": "https://github.com/mstimberg" + }, + { + "@id": "https://orcid.org/0000-0003-1007-6474", + "@type": "Person", + "familyName": "Goodman", + "givenName": "Dan F. M.", + "identifier": "https://github.com/thesamovar" + }, + { + "@id": "https://orcid.org/0000-0002-1734-6070", + "@type": "Person", + "familyName": "Evans", + "givenName": "Benjamin D.", + "identifier": "https://github.com/bdevans" + }, + { + "@id": "https://orcid.org/0000-0003-0110-1623", + "@type": "Person", + "familyName": "Brette", + "givenName": "Romain", + "identifier": "https://github.com/romainbrette" + } + ], + "codeRepository": "https://github.com/brian-team/brian2", + "continuousIntegration": "https://github.com/brian-team/brian2/actions", + "issueTracker": "https://github.com/brian-team/brian2/issues", + "keywords": [ + "biological neural networks", + "computational neuroscience", + "neural networks", + "research", + "simulation", + "spiking neurons" + ], + "operatingSystem": "OS Independent", + "softwareHelp": "https://brian2.readthedocs.io/", + "developmentStatus": "active", + "description": "A clock-driven simulator for spiking neural networks", + "license": "https://spdx.org/licenses/CECILL-2.1", + "name": "Brian simulator", + "url": "https://briansimulator.org", + "programmingLanguage": [ + "Python" + ], + "runtimePlatform": [ + "Python", + "Python 3" + ] +} \ No newline at end of file diff --git a/.codemeta/create_codemeta.py b/.codemeta/create_codemeta.py new file mode 100644 index 000000000..395138c6d --- /dev/null +++ b/.codemeta/create_codemeta.py @@ -0,0 +1,52 @@ +import re +import os +import pkg_resources +import tomllib +import json +import sys + + +if __name__ == "__main__": + if not len(sys.argv) == 2: + raise ValueError("Usage: python create_codemeta.py ") + basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..") + + with open(os.path.join(basedir, "pyproject.toml"), "rb") as f: + pyproject = tomllib.load(f) + with open(os.path.join(basedir, ".codemeta", "codemeta_base.json"), "r", encoding="utf-8") as f: + codemeta = json.load(f) + with open(os.path.join(basedir, "AUTHORS"), "r", encoding="utf-8") as f: + authors = f.read().splitlines()[6:] + + # Add software requirements from pyproject.toml + parsed_deps = pkg_resources.parse_requirements(pyproject['project']['dependencies']) + codemeta["softwareRequirements"] = [] + for dep in parsed_deps: + version = ",".join(f"{op}{v}" for op,v in dep.specs) + requirement = {"name": dep.project_name,"@type": "SoftwareApplication", "runtimePlatform": "Python 3"} + if version: + requirement["version"] = version + codemeta["softwareRequirements"].append(requirement) + + # Add contributors from AUTHORS + codemeta["contributor"] = [] + for author in authors: + matches = re.match(r"^(\w[\w-]*?) ([\w]+[.]? )??(\w+) \(@(.*)\)$", author) + if not matches: + raise ValueError("author not matched:", author) + given_name, middle_name, family_name, github = matches.groups() + + contributor = {"@type": "Person", "givenName": given_name, "familyName": family_name, "identifier": f"https://github.com/{github}"} + # FIXME: additionalName does not seem to be recognized by codemeta (validation fails) + if middle_name: + contributor["givenName"] += " " + middle_name.strip() + codemeta["contributor"].append(contributor) + + # Add version from setuptools_scm + version = sys.argv[1] + codemeta["version"] = version + codemeta["softwareVersion"] = version + + # Write codemeta.json + with open(os.path.join(basedir, "codemeta.json"), "w", encoding="utf-8") as f: + json.dump(codemeta, f, indent=2, ensure_ascii=False) diff --git a/.github/workflows/post_release_updates.yml b/.github/workflows/post_release_updates.yml new file mode 100644 index 000000000..f680f5305 --- /dev/null +++ b/.github/workflows/post_release_updates.yml @@ -0,0 +1,38 @@ +name: Update metadata after release + +on: workflow_dispatch + +jobs: + build: + runs-on: ubuntu-latest + permissions: + # Give the default GITHUB_TOKEN write permission to commit and push the + # added or changed files to the repository. + contents: write + pull-requests: write + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: true # Needed to push changes back to the repository + fetch-depth: 0 + fetch-tags: true + - name: Install Python + uses: actions/setup-python@v5 + with: + python-version: 3.12 + - name: Install dependencies + run: python -m pip install cffconvert pyaml ruamel.yaml requests + - name: Update metadata + run: python dev/continuous-integration/update_zenodo_swh.py + - name: Verify CITATION.cff + run: cffconvert --validate + - name: Create Pull Request + uses: peter-evans/create-pull-request@v7 + with: + delete-branch: true + branch: update-metadata-post-release + title: Update CITATION.cff and README.md with metadata from Zenodo/SWH + commit-message: | + Update CITATION.cff and README.md with metadata from Zenodo/SWH + + [ci skip] \ No newline at end of file diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index e032eaa6a..00009e5e8 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -22,11 +22,12 @@ jobs: strategy: fail-fast: false matrix: - os: [ windows-latest, macOS-12, macOS-14 ] + os: [ windows-latest, macOS-13, macOS-14 ] steps: - uses: actions/checkout@v4 with: fetch-depth: 0 + persist-credentials: false - name: Set up Python 3.x uses: actions/setup-python@v5 with: @@ -48,7 +49,7 @@ jobs: path: ${{ steps.cibuildwheel-cache.outputs.dir }} key: ${{ runner.os }}-cibuildwheel - name: Build wheels - uses: pypa/cibuildwheel@v2.19.2 + uses: pypa/cibuildwheel@v2.22.0 env: CIBW_PROJECT_REQUIRES_PYTHON: ">=${{ needs.get_python_versions.outputs.min-python }}" CIBW_ARCHS: auto64 @@ -76,6 +77,7 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 + persist-credentials: false - name: Set up Python 3.x uses: actions/setup-python@v5 with: @@ -106,7 +108,7 @@ jobs: path: ${{ steps.cibuildwheel-cache.outputs.dir }} key: ${{ runner.os }}-${{ matrix.arch }}-cibuildwheel - name: Build wheels - uses: pypa/cibuildwheel@v2.19.2 + uses: pypa/cibuildwheel@v2.22.0 env: CIBW_PROJECT_REQUIRES_PYTHON: ">=${{ needs.get_python_versions.outputs.min-python }}" CIBW_ARCHS_LINUX: ${{ matrix.arch }} @@ -168,9 +170,13 @@ jobs: name: Build docker image runs-on: ubuntu-latest needs: build-linux + # Skip everything for PR authors that do not have permission to access secrets + if: ${{ github.event.pull_request.author_association != 'COLLABORATOR' && github.event.pull_request.author_association != 'OWNER' }} steps: - name: Checkout repository uses: actions/checkout@v4 + with: + persist-credentials: false # https://github.com/actions/checkout/ - name: Docker meta id: meta @@ -179,9 +185,9 @@ jobs: with: images: | briansimulator/brian + ghcr.io/brian-team/brian flavor: latest=true tags: | - # type=semver,pattern={{raw}} type=ref,event=tag labels: | org.opencontainers.image.title="Brian Docker Image" @@ -203,6 +209,13 @@ jobs: with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Login to GitHub Container Registry + if: ${{ github.repository == 'brian-team/brian2' && github.actor != 'dependabot[bot]'}} + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} - name: load Linux x86 distribution 📦 uses: actions/download-artifact@v4 with: @@ -235,5 +248,7 @@ jobs: 'BASE_IMAGE_TAG=3.12-bookworm' platforms: 'amd64,arm64' push: true - tags: briansimulator/brian-dev:dev-${{ github.ref_name }} - labels: ${{ steps.meta.outputs.labels }} + tags: | + briansimulator/brian-dev:dev-${{ github.ref_name }} + ghcr.io/brian-team/brian-dev:dev-${{ github.ref_name }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/static_analysis.yml b/.github/workflows/static_analysis.yml new file mode 100644 index 000000000..60006e8a3 --- /dev/null +++ b/.github/workflows/static_analysis.yml @@ -0,0 +1,29 @@ +name: " GitHub Actions Security Analysis with Zizmor" + +# The scheduled workflow runs every Sunday at 23:45 UTC. +on: + push: + schedule: + - cron: '45 23 * * 0' + +jobs: + build: + runs-on: ubuntu-latest + permissions: + security-events: write + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + - name: Setup Rust + uses: actions-rust-lang/setup-rust-toolchain@v1 + - name: Get zizmor + run: cargo install zizmor + # https://github.com/woodruffw/zizmor + - name: Run zizmor + run: zizmor --format sarif . > results.sarif + - name: Upload SARIF file + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: results.sarif + category: zizmor diff --git a/.github/workflows/test_latest.yml b/.github/workflows/test_latest.yml index 1c859ed61..135c9fb6c 100644 --- a/.github/workflows/test_latest.yml +++ b/.github/workflows/test_latest.yml @@ -30,8 +30,8 @@ jobs: matrix: os: [{image: ubuntu-latest, triplet: x64-linux}, {image: windows-latest, triplet: x64-windows}, - {image: macOS-12, triplet: x64-osx}, - {image: macOS-14, triplet: arm64-osx}] + {image: macOS-13, triplet: x64-osx}, + {image: macOS-latest, triplet: arm64-osx}] standalone: [false, true] float_dtype_32: [false, true] python-version: ["${{ needs.get_python_versions.outputs.max-python }}"] @@ -41,6 +41,7 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + persist-credentials: false - name: Install GSL uses: johnwason/vcpkg-action@v6 id: vcpkg @@ -65,24 +66,32 @@ jobs: python-version: ${{ matrix.python-version }} allow-prereleases: true - name: Install dependencies + shell: bash + env: + PYTHON_BINARY: ${{ steps.python.outputs.python-path }} run: | - ${{ steps.python.outputs.python-path }} -m pip install --upgrade pip setuptools - ${{ steps.python.outputs.python-path }} -m pip install --pre -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple numpy scipy - ${{ steps.python.outputs.python-path }} -m pip install mpmath # install stable version - ${{ steps.python.outputs.python-path }} -m pip install --pre pytest pytest-xdist pytest-cov pytest-timeout cython sympy pyparsing jinja2 sphinx + "$PYTHON_BINARY" -m pip install --upgrade pip setuptools + "$PYTHON_BINARY" -m pip install --pre -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple numpy scipy + "$PYTHON_BINARY" -m pip install mpmath # install stable version + "$PYTHON_BINARY" -m pip install --pre pytest pytest-xdist pytest-cov pytest-timeout cython sympy pyparsing jinja2 sphinx - name: Install Brian2 + shell: bash + env: + PYTHON_BINARY: ${{ steps.python.outputs.python-path }} run: | cp numpy2.pyproject.toml pyproject.toml - ${{ steps.python.outputs.python-path }} -m pip install --pre -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple --extra-index-url https://pypi.org/simple . + "$PYTHON_BINARY" -m pip install --pre -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple --extra-index-url https://pypi.org/simple . - name: Run Tests + shell: bash run: | - cd ${{ github.workspace }}/.. # move out of the workspace to avoid direct import - ${{ steps.python.outputs.python-path }} -Wd ${{ github.workspace }}/dev/continuous-integration/run_test_suite.py + cd "$GITHUB_WORKSPACE"/.. # move out of the workspace to avoid direct import + "$PYTHON_BINARY" -Wd "$GITHUB_WORKSPACE/dev/continuous-integration/run_test_suite.py" env: AGENT_OS: ${{runner.os}} STANDALONE: ${{ matrix.standalone }} FLOAT_DTYPE_32: ${{ matrix.float_dtype_32 }} DO_NOT_RESET_PREFERENCES: true # Make sure that GSL setting is used + PYTHON_BINARY: ${{ steps.python.outputs.python-path }} test-deprecations: needs: [ get_python_versions ] @@ -99,6 +108,7 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + persist-credentials: false - name: Install GSL uses: johnwason/vcpkg-action@v6 id: vcpkg @@ -116,16 +126,21 @@ jobs: python-version: ${{ matrix.python-version }} allow-prereleases: true - name: Install dependencies + env: + PYTHON_BINARY: ${{ steps.python.outputs.python-path }} run: | - ${{ steps.python.outputs.python-path }} -m pip install --upgrade pip setuptools - ${{ steps.python.outputs.python-path }} -m pip install --pre -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple numpy scipy - ${{ steps.python.outputs.python-path }} -m pip install --pre pytest cython sympy pyparsing jinja2 sphinx + "$PYTHON_BINARY" -m pip install --upgrade pip setuptools + "$PYTHON_BINARY" -m pip install --pre -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple numpy scipy + "$PYTHON_BINARY" -m pip install --pre pytest cython sympy pyparsing jinja2 sphinx - name: Install Brian2 - run: ${{ steps.python.outputs.python-path }} -m pip install --pre -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple --extra-index-url https://pypi.org/simple . + env: + PYTHON_BINARY: ${{ steps.python.outputs.python-path }} + run: | + "$PYTHON_BINARY" -m pip install --pre -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple --extra-index-url https://pypi.org/simple . - name: Determine Cython cache dir id: cython-cache run: | - CACHE_DIR=$(python -c 'import os; from Cython.Utils import get_cython_cache_dir; print(os.path.join(get_cython_cache_dir(), "brian_extensions"))') + CACHE_DIR=$(python -c 'from brian2.codegen.runtime.cython_rt.extension_manager import get_cython_cache_dir; print(get_cython_cache_dir())') echo "Cython cache dir: $CACHE_DIR" echo "cachedir=$CACHE_DIR" >> "$GITHUB_OUTPUT" - name: restore Cython cache @@ -137,8 +152,9 @@ jobs: - name: Run Tests run: | cd ${{ github.workspace }}/.. # move out of the workspace to avoid direct import - ${{ steps.python.outputs.python-path }} -Wd ${{ github.workspace }}/dev/continuous-integration/run_test_suite.py || echo "Tests failed (but not marked as failed on GA)" + "$PYTHON_BINARY" -Wd ${{ github.workspace }}/dev/continuous-integration/run_test_suite.py || echo "Tests failed (but not marked as failed on GA)" env: DEPRECATION_ERROR: true AGENT_OS: linux STANDALONE: ${{ matrix.standalone }} + PYTHON_BINARY: ${{ steps.python.outputs.python-path }} diff --git a/.github/workflows/testsuite.yml b/.github/workflows/testsuite.yml index 4b9a20eb8..77d876cfa 100644 --- a/.github/workflows/testsuite.yml +++ b/.github/workflows/testsuite.yml @@ -21,6 +21,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + with: + persist-credentials: false - uses: actions/setup-python@v5 with: python-version: '3.10' @@ -38,7 +40,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-22.04, windows-2022, macOS-12, macOS-14] + os: [ubuntu-22.04, windows-2022, macOS-13, macOS-14] standalone: [false, true] float_dtype_32: [false, true] python-version: ["${{ needs.get_python_versions.outputs.max-python }}"] @@ -60,6 +62,7 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + persist-credentials: false - name: Setup Conda and Python uses: conda-incubator/setup-miniconda@v3 @@ -102,7 +105,7 @@ jobs: FLOAT_DTYPE_32: ${{ matrix.float_dtype_32 }} - name: Send coverage to Coveralls (parallel) if: ${{ startsWith(matrix.os, 'ubuntu-') && matrix.python-version == needs.get_python_versions.outputs.max-python }} - uses: coverallsapp/github-action@v2.3.0 + uses: coverallsapp/github-action@v2.3.4 with: parallel: true flag-name: run ${{ join(matrix.*, ' - ') }} @@ -113,7 +116,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Close parallel build - uses: coverallsapp/github-action@v2.3.0 + uses: coverallsapp/github-action@v2.3.4 with: parallel-finished: true @@ -129,6 +132,7 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + persist-credentials: false submodules: true - name: Setup Conda and Python diff --git a/CITATION.cff b/CITATION.cff index 24373624a..3c8afcb1a 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -1,51 +1,60 @@ cff-version: 1.2.0 + message: If you use this software, please cite it using these metadata. + title: Brian simulator + abstract: A clock-driven simulator for spiking neural networks + authors: - family-names: Stimberg given-names: Marcel - orcid: "https://orcid.org/0000-0002-2648-4790" + orcid: https://orcid.org/0000-0002-2648-4790 - family-names: Goodman given-names: Dan F. M. - orcid: "https://orcid.org/0000-0003-1007-6474" + orcid: https://orcid.org/0000-0003-1007-6474 - family-names: Evans given-names: Benjmain - orcid: "https://orcid.org/0000-0002-1734-6070" + orcid: https://orcid.org/0000-0002-1734-6070 - family-names: Brette given-names: Romain - orcid: "https://orcid.org/0000-0003-0110-1623" - - name: "Brian contributors" + orcid: https://orcid.org/0000-0003-0110-1623 + - name: Brian contributors + version: 2.6.0 -date-released: "2024-03-15" + +date-released: '2024-03-15' + identifiers: - description: This is the collection of archived snapshots of all versions of Brian 2 type: doi - value: "10.5281/zenodo.654861" - - description: This is the archived snapshot of version 2.6.0 of Brian 2 + value: 10.5281/zenodo.654861 + - description: This is the archived snapshot of version 2.7.1 of Brian 2 type: doi - value: "10.5281/zenodo.10822565" - - description: Software heritage identifier for version 2.6.0 + value: 10.5281/zenodo.14283067 + - description: Software Heritage identifier for version 2.7.1 of Brian 2 type: swh - value: "swh:1:rel:2d4c5c8c8a6d2318332889df93ab74aef53e2c61" + value: swh:1:rel:7c128994b35bd756b4d248240513e4d1a934a0c0 + license: CECILL-2.1 -repository-code: "https://github.com/brian-team/brian2" -preferred-citation: - authors: - - family-names: Stimberg - given-names: Marcel - orcid: "https://orcid.org/0000-0002-2648-4790" - - family-names: Goodman - given-names: Dan F. M. - orcid: "https://orcid.org/0000-0003-1007-6474" - - family-names: Brette - given-names: Romain - orcid: "https://orcid.org/0000-0003-0110-1623" - title: "Brian 2, an intuitive and efficient neural simulator" - journal: eLife - month: 8 - year: 2019 - volume: 8 - doi: "10.7554/eLife.47314" - type: article +repository-code: https://github.com/brian-team/brian2 + +preferred-citation: + authors: + - family-names: Stimberg + given-names: Marcel + orcid: https://orcid.org/0000-0002-2648-4790 + - family-names: Goodman + given-names: Dan F. M. + orcid: https://orcid.org/0000-0003-1007-6474 + - family-names: Brette + given-names: Romain + orcid: https://orcid.org/0000-0003-0110-1623 + title: Brian 2, an intuitive and efficient neural simulator + journal: eLife + month: 8 + year: 2019 + volume: 8 + doi: 10.7554/eLife.47314 + type: article diff --git a/LICENSE b/LICENSE index c06e706f5..0534996dd 100644 --- a/LICENSE +++ b/LICENSE @@ -659,10 +659,10 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- +-- -The Mersenne-Twister implementation "Randomkit" in brian2.random.randomkit is -based on code by Jean-Sebastien Roy, provided under the MIT license: +The `rand()` and `randn()` implementations are based on code by +Jean-Sebastien Roy, provided under the MIT license: Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org) @@ -687,47 +687,6 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -- -The rk_random and rk_seed functions algorithms and the original design of -the Mersenne Twister RNG: - -Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - -3. The names of its contributors may not be used to endorse or promote -products derived from this software without specific prior written -permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --- - -Original algorithm for the implementation of rk_interval function from -Richard J. Wagner's implementation of the Mersenne Twister RNG, optimised by -Magnus Jonsson. - --- - Constants used in the rk_double implementation by Isaku Wada. -- diff --git a/README.md b/README.md index 89de6d90e..7ca43ea43 100644 --- a/README.md +++ b/README.md @@ -23,9 +23,9 @@ If you use Brian for your published research, we kindly ask you to cite our arti [![AUR version](https://img.shields.io/aur/version/python-brian2)](https://aur.archlinux.org/packages/python-brian2) [![Docker Pulls](https://img.shields.io/docker/pulls/briansimulator/brian)](https://hub.docker.com/r/briansimulator/brian) -[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.654861.svg)](https://zenodo.org/doi/10.5281/zenodo.654861) +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.14283067.svg)](https://zenodo.org/doi/10.5281/zenodo.14283067) [![Software Heritage (repository)](https://archive.softwareheritage.org/badge/origin/https://github.com/brian-team/brian2/)](https://archive.softwareheritage.org/browse/origin/?origin_url=https://github.com/brian-team/brian2) -[![Software Heritage (release)](https://archive.softwareheritage.org/badge/swh:1:rel:2d4c5c8c8a6d2318332889df93ab74aef53e2c61/)](https://archive.softwareheritage.org/swh:1:rel:2d4c5c8c8a6d2318332889df93ab74aef53e2c61;origin=https://github.com/brian-team/brian2;visit=swh:1:snp:a90ab7416901a9c5cf6f56d68b3455c65d322afc) +[![Software Heritage (release)](https://archive.softwareheritage.org/badge/swh:1:rel:7c128994b35bd756b4d248240513e4d1a934a0c0/)](https://archive.softwareheritage.org/swh:1:rel:7c128994b35bd756b4d248240513e4d1a934a0c0;origin=https://github.com/brian-team/brian2;visit=swh:1:snp:9ba8406b05353301216ff7a0adb097a9297c62b3) [![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-v1.4%20adopted-ff69b4.svg)](CODE_OF_CONDUCT.md) [![Discourse topics](https://img.shields.io/discourse/topics?server=https%3A%2F%2Fbrian.discourse.group)](https://brian.discourse.group) diff --git a/brian2/codegen/cpp_prefs.py b/brian2/codegen/cpp_prefs.py index 653dd777b..8045ee01d 100644 --- a/brian2/codegen/cpp_prefs.py +++ b/brian2/codegen/cpp_prefs.py @@ -17,9 +17,13 @@ import subprocess import sys import tempfile -from distutils.ccompiler import get_default_compiler -from setuptools import msvc +try: + from setuptools.msvc import msvc14_get_vc_env as _get_vc_env +except ImportError: # Setuptools 0.74.0 removed this function + from distutils._msvccompiler import _get_vc_env + +from distutils.ccompiler import get_default_compiler from brian2.core.preferences import BrianPreference, prefs from brian2.utils.filetools import ensure_directory @@ -352,7 +356,7 @@ def get_msvc_env(): # Search for MSVC environment if not already cached if _msvc_env is None: try: - _msvc_env = msvc.msvc14_get_vc_env(arch_name) + _msvc_env = _get_vc_env(arch_name) except distutils.errors.DistutilsPlatformError: raise OSError( "Cannot find Microsoft Visual Studio, You " diff --git a/brian2/codegen/optimisation.py b/brian2/codegen/optimisation.py index 73fa90966..36505c70f 100644 --- a/brian2/codegen/optimisation.py +++ b/brian2/codegen/optimisation.py @@ -444,11 +444,11 @@ def reduced_node(terms, op): -------- >>> import ast >>> nodes = [ast.Name(id='x'), ast.Name(id='y'), ast.Name(id='z')] - >>> ast.dump(reduced_node(nodes, ast.Mult), annotate_fields=False) - "BinOp(BinOp(Name('x'), Mult(), Name('y')), Mult(), Name('z'))" + >>> ast.unparse(reduced_node(nodes, ast.Mult)) + 'x * y * z' >>> nodes = [ast.Name(id='x')] - >>> ast.dump(reduced_node(nodes, ast.Add), annotate_fields=False) - "Name('x')" + >>> ast.unparse(reduced_node(nodes, ast.Add)) + 'x' """ # Remove None terms terms = [term for term in terms if term is not None] diff --git a/brian2/codegen/runtime/cython_rt/extension_manager.py b/brian2/codegen/runtime/cython_rt/extension_manager.py index b2a56b2d2..e9d0b22b0 100644 --- a/brian2/codegen/runtime/cython_rt/extension_manager.py +++ b/brian2/codegen/runtime/cython_rt/extension_manager.py @@ -21,7 +21,13 @@ import Cython import Cython.Build as Cython_Build import Cython.Compiler as Cython_Compiler - from Cython.Utils import get_cython_cache_dir as base_cython_cache_dir + + try: + from Cython.Utils import get_cython_cache_dir as base_cython_cache_dir + except ImportError: + from Cython.Build.Cache import ( + get_cython_cache_dir as base_cython_cache_dir, # Cython 3.1, see cython/cython#6090 + ) except ImportError: Cython = None diff --git a/brian2/conftest.py b/brian2/conftest.py index 1c6f25949..fa1b0fbf7 100644 --- a/brian2/conftest.py +++ b/brian2/conftest.py @@ -14,12 +14,12 @@ from brian2.units import ms -def pytest_ignore_collect(path, config): +def pytest_ignore_collect(collection_path, config): if config.option.doctestmodules: - if "tests" in str(path): + if "tests" in collection_path.parts: return True # Ignore tests package for doctests # Do not test brian2.hears bridge (needs Brian1) - if str(path).endswith("hears.py"): + if collection_path.name == "hears.py": return True diff --git a/brian2/core/variables.py b/brian2/core/variables.py index d52f2656c..f5e5e7f91 100644 --- a/brian2/core/variables.py +++ b/brian2/core/variables.py @@ -1574,7 +1574,15 @@ def __hash__(self): @property def shape(self): if self.ndim == 1: - return (self.variable.size,) + if not self.variable.scalar: + # This is safer than using the variable size, since it also works for subgroups + # see GitHub issue #1555 + size = self.group.stop - self.group.start + assert size <= self.variable.size + else: + size = self.variable.size + + return (size,) else: return self.variable.size diff --git a/brian2/devices/cpp_standalone/codeobject.py b/brian2/devices/cpp_standalone/codeobject.py index 9778c538d..50fdec9cb 100644 --- a/brian2/devices/cpp_standalone/codeobject.py +++ b/brian2/devices/cpp_standalone/codeobject.py @@ -138,15 +138,11 @@ def generate_rand_code(rand_func, owner): thread_number = "0" else: thread_number = "omp_get_thread_num()" - if rand_func == "rand": - rk_call = "rk_double" - elif rand_func == "randn": - rk_call = "rk_gauss" - else: + if rand_func not in ["rand", "randn"]: raise AssertionError(rand_func) code = """ - double _%RAND_FUNC%(const int _vectorisation_idx) { - return %RK_CALL%(brian::_mersenne_twister_states[%THREAD_NUMBER%]); + inline double _%RAND_FUNC%(const int _vectorisation_idx) { + return brian::_random_generators[%THREAD_NUMBER%].%RAND_FUNC%(); } """ code = replace( @@ -154,7 +150,6 @@ def generate_rand_code(rand_func, owner): { "%THREAD_NUMBER%": thread_number, "%RAND_FUNC%": rand_func, - "%RK_CALL%": rk_call, }, ) return {"support_code": code} diff --git a/brian2/devices/cpp_standalone/device.py b/brian2/devices/cpp_standalone/device.py index 7cb100b98..729385f74 100644 --- a/brian2/devices/cpp_standalone/device.py +++ b/brian2/devices/cpp_standalone/device.py @@ -19,7 +19,6 @@ import numpy as np -import brian2 from brian2.codegen.codeobject import check_compiler_kwds from brian2.codegen.cpp_prefs import get_compiler_and_args, get_msvc_env from brian2.codegen.generators.cpp_generator import c_data_type @@ -198,6 +197,9 @@ def __init__(self): #: Dict of all static saved arrays self.static_arrays = {} + #: Names of static arrays used for run_args given as lists of values + self.run_args_arrays = [] + #: Dict of all TimedArray objects self.timed_arrays = {} @@ -228,8 +230,8 @@ def __init__(self): self.extra_compile_args = [] self.define_macros = [] self.headers = [] - self.include_dirs = ["brianlib/randomkit"] - self.library_dirs = ["brianlib/randomkit"] + self.include_dirs = [] + self.library_dirs = [] self.runtime_library_dirs = [] self.run_environment_variables = {} if sys.platform.startswith("darwin"): @@ -873,13 +875,10 @@ def generate_main_source(self, writer): nb_threads = 1 main_lines.append(f"for (int _i=0; _i<{nb_threads}; _i++)") if seed is None: # random - main_lines.append( - " rk_randomseed(brian::_mersenne_twister_states[_i]);" - ) + main_lines.append(" brian::_random_generators[_i].seed();") else: main_lines.append( - f" rk_seed({seed!r}L + _i," - " brian::_mersenne_twister_states[_i]);" + f" brian::_random_generators[_i].seed({seed!r}L + _i);" ) else: raise NotImplementedError(f"Unknown main queue function type {func}") @@ -1084,28 +1083,6 @@ def copy_source_files(self, writer, directory): os.path.join(directory, "brianlib", "stdint_compat.h"), ) - # Copy the RandomKit implementation - if not os.path.exists(os.path.join(directory, "brianlib", "randomkit")): - os.mkdir(os.path.join(directory, "brianlib", "randomkit")) - shutil.copy2( - os.path.join( - os.path.split(inspect.getsourcefile(brian2))[0], - "random", - "randomkit", - "randomkit.c", - ), - os.path.join(directory, "brianlib", "randomkit", "randomkit.c"), - ) - shutil.copy2( - os.path.join( - os.path.split(inspect.getsourcefile(brian2))[0], - "random", - "randomkit", - "randomkit.h", - ), - os.path.join(directory, "brianlib", "randomkit", "randomkit.h"), - ) - def _insert_func_namespace(self, func, code_object, namespace): impl = func.implementations[self.code_object_class()] func_namespace = impl.get_namespace(code_object.owner) @@ -1174,12 +1151,7 @@ def compile_source(self, directory, compiler, debug, clean): self.timers["compile"]["make"] = time.time() - start_time if x != 0: - if os.path.exists("winmake.log"): - with open("winmake.log") as f: - print(f.read()) - error_message = ( - "Project compilation failed (error code: %u)." % x - ) + error_message = f"Project compilation failed (error code: {x}), consider having a look at 'winmake.log'." if not clean: error_message += ( " Consider running with " @@ -1243,6 +1215,7 @@ def run( ) ensure_directory(self.results_dir) + self.run_args_arrays.clear() # forget about arrays from previous runs if run_args is None: run_args = [] elif isinstance(run_args, Mapping): @@ -1274,6 +1247,7 @@ def run( with FileLock(fname + ".lock"): if not os.path.exists(fname): value_ar.tofile(fname) + self.run_args_arrays.append(value_name) list_rep.append(f"{name}={string_value}") run_args = list_rep @@ -1559,10 +1533,20 @@ def build( libraries = self.libraries + prefs["codegen.cpp.libraries"] + codeobj_libraries compiler_obj = ccompiler.new_compiler(compiler=compiler) + + # Distutils does not use the shell, so it does not need to quote filenames/paths + # Since we include the compiler flags in the makefile, we need to quote them + include_dirs = [f'"{include_dir}"' for include_dir in include_dirs] + library_dirs = [f'"{library_dir}"' for library_dir in library_dirs] + runtime_library_dirs = [ + f'"{runtime_dir}"' for runtime_dir in runtime_library_dirs + ] + compiler_flags = ( ccompiler.gen_preprocess_options(define_macros, include_dirs) + extra_compile_args ) + linker_flags = ( ccompiler.gen_lib_options( compiler_obj, @@ -1578,9 +1562,7 @@ def build( for codeobj in self.code_objects.values() for source_file in codeobj.compiler_kwds.get("sources", []) ] - additional_source_files += codeobj_source_files + [ - "brianlib/randomkit/randomkit.c" - ] + additional_source_files += codeobj_source_files for d in ["code_objects", "results", "static_arrays"]: ensure_directory(os.path.join(directory, d)) @@ -1655,11 +1637,11 @@ def build( ] logger.debug(f"Time measurements: {', '.join(logged_times)}") - def delete(self, code=True, data=True, directory=True, force=False): + def delete(self, code=True, data=True, run_args=True, directory=True, force=False): if self.project_dir is None: return # Nothing to delete - if directory and not (code and data): + if directory and not all([code, data, run_args]): raise ValueError( "When deleting the directory, code and data will" "be deleted as well. Set the corresponding " @@ -1698,7 +1680,6 @@ def delete(self, code=True, data=True, directory=True, force=False): fnames.extend( [ os.path.join("brianlib", "spikequeue.h"), - os.path.join("brianlib", "randomkit", "randomkit.h"), os.path.join("brianlib", "stdint_compat.h"), ] ) @@ -1715,18 +1696,21 @@ def delete(self, code=True, data=True, directory=True, force=False): for static_array_name in self.static_arrays: fnames.append(os.path.join("static_arrays", static_array_name)) + if run_args: + for fname in self.run_args_arrays: + fnames.append(os.path.join("static_arrays", fname)) + for fname in fnames: full_fname = os.path.join(self.project_dir, fname) try: os.remove(full_fname) except OSError as ex: - logger.debug(f'File "{full_fname}" could not be deleted: {str(ex)}') + logger.warn(f'File "{full_fname}" could not be deleted: {str(ex)}') # Delete directories if directory: directories = [ - os.path.join("brianlib", "randomkit"), "brianlib", "code_objects", "results", diff --git a/brian2/devices/cpp_standalone/templates/main.cpp b/brian2/devices/cpp_standalone/templates/main.cpp index 5758fff70..ba57df813 100644 --- a/brian2/devices/cpp_standalone/templates/main.cpp +++ b/brian2/devices/cpp_standalone/templates/main.cpp @@ -5,7 +5,6 @@ {{ openmp_pragma('include') }} #include "run.h" #include "brianlib/common_math.h" -#include "randomkit.h" {% for codeobj in code_objects | sort(attribute='name') %} #include "code_objects/{{codeobj.name}}.h" @@ -36,6 +35,7 @@ void set_from_command_line(const std::vector args) } int main(int argc, char **argv) { + std::random_device _rd; std::vector args(argv + 1, argv + argc); if (args.size() >=2 && args[0] == "--results_dir") { diff --git a/brian2/devices/cpp_standalone/templates/objects.cpp b/brian2/devices/cpp_standalone/templates/objects.cpp index 166caab29..fab6bf3a0 100644 --- a/brian2/devices/cpp_standalone/templates/objects.cpp +++ b/brian2/devices/cpp_standalone/templates/objects.cpp @@ -20,7 +20,7 @@ set_variable_from_value(name, {{array_name}}, var_size, (char)atoi(s_value.c_str #include "brianlib/dynamic_array.h" #include "brianlib/stdint_compat.h" #include "network.h" -#include "randomkit.h" +#include #include #include #include @@ -32,7 +32,11 @@ set_variable_from_value(name, {{array_name}}, var_size, (char)atoi(s_value.c_str namespace brian { std::string results_dir = "results/"; // can be overwritten by --results_dir command line arg -std::vector< rk_state* > _mersenne_twister_states; + +// For multhreading, we need one generator for each thread. We also create a distribution for +// each thread, even though this is not strictly necessary for the uniform distribution, as +// the distribution is stateless. +std::vector< RandomGenerator > _random_generators; //////////////// networks ///////////////// {% for net in networks | sort(attribute='name') %} @@ -76,15 +80,15 @@ template void set_variable_from_file(std::string varname, T* var_pointe //////////////// set arrays by name /////// void set_variable_by_name(std::string name, std::string s_value) { - size_t var_size; - size_t data_size; - // C-style or Python-style capitalization is allowed for boolean values + size_t var_size; + size_t data_size; + // C-style or Python-style capitalization is allowed for boolean values if (s_value == "true" || s_value == "True") s_value = "1"; else if (s_value == "false" || s_value == "False") s_value = "0"; - // non-dynamic arrays - {% for var, varname in array_specs | dictsort(by='value') %} + // non-dynamic arrays + {% for var, varname in array_specs | dictsort(by='value') %} {% if not var in dynamic_array_specs and not var.read_only %} if (name == "{{var.owner.name}}.{{var.name}}") { var_size = {{var.size}}; @@ -167,8 +171,8 @@ const int _num_{{name}} = {{N}}; // {{S.name}} {% for path in S._pathways | sort(attribute='name') %} SynapticPathway {{path.name}}( - {{dynamic_array_specs[path.synapse_sources]}}, - {{path.source.start}}, {{path.source.stop}}); + {{dynamic_array_specs[path.synapse_sources]}}, + {{path.source.start}}, {{path.source.stop}}); {% endfor %} {% endfor %} @@ -187,107 +191,108 @@ double {{codeobj}}_profiling_info = 0.0; void _init_arrays() { - using namespace brian; + using namespace brian; // Arrays initialized to 0 - {% for var, varname in zero_arrays | sort(attribute='1') %} - {% if varname in dynamic_array_specs.values() %} - {{varname}}.resize({{var.size}}); - {% else %} - {{varname}} = new {{c_data_type(var.dtype)}}[{{var.size}}]; - {% endif %} + {% for var, varname in zero_arrays | sort(attribute='1') %} + {% if varname in dynamic_array_specs.values() %} + {{varname}}.resize({{var.size}}); + {% else %} + {{varname}} = new {{c_data_type(var.dtype)}}[{{var.size}}]; + {% endif %} {{ openmp_pragma('parallel-static')}} - for(int i=0; i<{{var.size}}; i++) {{varname}}[i] = 0; + for(int i=0; i<{{var.size}}; i++) {{varname}}[i] = 0; - {% endfor %} + {% endfor %} - // Arrays initialized to an "arange" - {% for var, varname, start in arange_arrays | sort(attribute='1')%} - {% if varname in dynamic_array_specs.values() %} - {{varname}}.resize({{var.size}}); - {% else %} - {{varname}} = new {{c_data_type(var.dtype)}}[{{var.size}}]; - {% endif %} + // Arrays initialized to an "arange" + {% for var, varname, start in arange_arrays | sort(attribute='1')%} + {% if varname in dynamic_array_specs.values() %} + {{varname}}.resize({{var.size}}); + {% else %} + {{varname}} = new {{c_data_type(var.dtype)}}[{{var.size}}]; + {% endif %} {{ openmp_pragma('parallel-static')}} - for(int i=0; i<{{var.size}}; i++) {{varname}}[i] = {{start}} + i; - - {% endfor %} - - // static arrays - {% for (name, dtype_spec, N, filename) in static_array_specs | sort %} - {% if name in dynamic_array_specs.values() %} - {{name}}.resize({{N}}); - {% else %} - {{name}} = new {{dtype_spec}}[{{N}}]; - {% endif %} - {% endfor %} - - // Random number generator states - for (int i=0; i<{{openmp_pragma('get_num_threads')}}; i++) - _mersenne_twister_states.push_back(new rk_state()); + for(int i=0; i<{{var.size}}; i++) {{varname}}[i] = {{start}} + i; + + {% endfor %} + + // static arrays + {% for (name, dtype_spec, N, filename) in static_array_specs | sort %} + {% if name in dynamic_array_specs.values() %} + {{name}}.resize({{N}}); + {% else %} + {{name}} = new {{dtype_spec}}[{{N}}]; + {% endif %} + {% endfor %} + + // Random number generator states + std::random_device rd; + for (int i=0; i<{{openmp_pragma('get_num_threads')}}; i++) + _random_generators.push_back(RandomGenerator()); } void _load_arrays() { - using namespace brian; - - {% for (name, dtype_spec, N, filename) in static_array_specs | sort %} - ifstream f{{name}}; - f{{name}}.open("static_arrays/{{name}}", ios::in | ios::binary); - if(f{{name}}.is_open()) - { - {% if name in dynamic_array_specs.values() %} - f{{name}}.read(reinterpret_cast(&{{name}}[0]), {{N}}*sizeof({{dtype_spec}})); - {% else %} - f{{name}}.read(reinterpret_cast({{name}}), {{N}}*sizeof({{dtype_spec}})); - {% endif %} - } else - { - std::cout << "Error opening static array {{name}}." << endl; - } - {% endfor %} + using namespace brian; + + {% for (name, dtype_spec, N, filename) in static_array_specs | sort %} + ifstream f{{name}}; + f{{name}}.open("static_arrays/{{name}}", ios::in | ios::binary); + if(f{{name}}.is_open()) + { + {% if name in dynamic_array_specs.values() %} + f{{name}}.read(reinterpret_cast(&{{name}}[0]), {{N}}*sizeof({{dtype_spec}})); + {% else %} + f{{name}}.read(reinterpret_cast({{name}}), {{N}}*sizeof({{dtype_spec}})); + {% endif %} + } else + { + std::cout << "Error opening static array {{name}}." << endl; + } + {% endfor %} } void _write_arrays() { - using namespace brian; - - {% for var, varname in array_specs | dictsort(by='value') %} - {% if not (var in dynamic_array_specs or var in dynamic_array_2d_specs) %} - ofstream outfile_{{varname}}; - outfile_{{varname}}.open(results_dir + "{{get_array_filename(var)}}", ios::binary | ios::out); - if(outfile_{{varname}}.is_open()) - { - outfile_{{varname}}.write(reinterpret_cast({{varname}}), {{var.size}}*sizeof({{get_array_name(var)}}[0])); - outfile_{{varname}}.close(); - } else - { - std::cout << "Error writing output file for {{varname}}." << endl; - } - {% endif %} - {% endfor %} - - {% for var, varname in dynamic_array_specs | dictsort(by='value') %} - ofstream outfile_{{varname}}; - outfile_{{varname}}.open(results_dir + "{{get_array_filename(var)}}", ios::binary | ios::out); - if(outfile_{{varname}}.is_open()) - { + using namespace brian; + + {% for var, varname in array_specs | dictsort(by='value') %} + {% if not (var in dynamic_array_specs or var in dynamic_array_2d_specs) %} + ofstream outfile_{{varname}}; + outfile_{{varname}}.open(results_dir + "{{get_array_filename(var)}}", ios::binary | ios::out); + if(outfile_{{varname}}.is_open()) + { + outfile_{{varname}}.write(reinterpret_cast({{varname}}), {{var.size}}*sizeof({{get_array_name(var)}}[0])); + outfile_{{varname}}.close(); + } else + { + std::cout << "Error writing output file for {{varname}}." << endl; + } + {% endif %} + {% endfor %} + + {% for var, varname in dynamic_array_specs | dictsort(by='value') %} + ofstream outfile_{{varname}}; + outfile_{{varname}}.open(results_dir + "{{get_array_filename(var)}}", ios::binary | ios::out); + if(outfile_{{varname}}.is_open()) + { if (! {{varname}}.empty() ) { - outfile_{{varname}}.write(reinterpret_cast(&{{varname}}[0]), {{varname}}.size()*sizeof({{varname}}[0])); - outfile_{{varname}}.close(); - } - } else - { - std::cout << "Error writing output file for {{varname}}." << endl; - } - {% endfor %} - - {% for var, varname in dynamic_array_2d_specs | dictsort(by='value') %} - ofstream outfile_{{varname}}; - outfile_{{varname}}.open(results_dir + "{{get_array_filename(var)}}", ios::binary | ios::out); - if(outfile_{{varname}}.is_open()) - { + outfile_{{varname}}.write(reinterpret_cast(&{{varname}}[0]), {{varname}}.size()*sizeof({{varname}}[0])); + outfile_{{varname}}.close(); + } + } else + { + std::cout << "Error writing output file for {{varname}}." << endl; + } + {% endfor %} + + {% for var, varname in dynamic_array_2d_specs | dictsort(by='value') %} + ofstream outfile_{{varname}}; + outfile_{{varname}}.open(results_dir + "{{get_array_filename(var)}}", ios::binary | ios::out); + if(outfile_{{varname}}.is_open()) + { for (int n=0; n<{{varname}}.n; n++) { if (! {{varname}}(n).empty()) @@ -296,63 +301,63 @@ void _write_arrays() } } outfile_{{varname}}.close(); - } else - { - std::cout << "Error writing output file for {{varname}}." << endl; - } - {% endfor %} + } else + { + std::cout << "Error writing output file for {{varname}}." << endl; + } + {% endfor %} {% if profiled_codeobjects is defined and profiled_codeobjects %} - // Write profiling info to disk - ofstream outfile_profiling_info; - outfile_profiling_info.open(results_dir + "profiling_info.txt", ios::out); - if(outfile_profiling_info.is_open()) - { - {% for codeobj in profiled_codeobjects | sort %} - outfile_profiling_info << "{{codeobj}}\t" << {{codeobj}}_profiling_info << std::endl; - {% endfor %} - outfile_profiling_info.close(); - } else - { - std::cout << "Error writing profiling info to file." << std::endl; - } + // Write profiling info to disk + ofstream outfile_profiling_info; + outfile_profiling_info.open(results_dir + "profiling_info.txt", ios::out); + if(outfile_profiling_info.is_open()) + { + {% for codeobj in profiled_codeobjects | sort %} + outfile_profiling_info << "{{codeobj}}\t" << {{codeobj}}_profiling_info << std::endl; + {% endfor %} + outfile_profiling_info.close(); + } else + { + std::cout << "Error writing profiling info to file." << std::endl; + } {% endif %} - // Write last run info to disk - ofstream outfile_last_run_info; - outfile_last_run_info.open(results_dir + "last_run_info.txt", ios::out); - if(outfile_last_run_info.is_open()) - { - outfile_last_run_info << (Network::_last_run_time) << " " << (Network::_last_run_completed_fraction) << std::endl; - outfile_last_run_info.close(); - } else - { - std::cout << "Error writing last run info to file." << std::endl; - } + // Write last run info to disk + ofstream outfile_last_run_info; + outfile_last_run_info.open(results_dir + "last_run_info.txt", ios::out); + if(outfile_last_run_info.is_open()) + { + outfile_last_run_info << (Network::_last_run_time) << " " << (Network::_last_run_completed_fraction) << std::endl; + outfile_last_run_info.close(); + } else + { + std::cout << "Error writing last run info to file." << std::endl; + } } void _dealloc_arrays() { - using namespace brian; - - {% for var, varname in array_specs | dictsort(by='value') %} - {% if varname in dynamic_array_specs.values() %} - if({{varname}}!=0) - { - delete [] {{varname}}; - {{varname}} = 0; - } - {% endif %} - {% endfor %} - - // static arrays - {% for (name, dtype_spec, N, filename) in static_array_specs | sort %} - {% if not name in dynamic_array_specs.values() %} - if({{name}}!=0) - { - delete [] {{name}}; - {{name}} = 0; - } - {% endif %} - {% endfor %} + using namespace brian; + + {% for var, varname in array_specs | dictsort(by='value') %} + {% if varname in dynamic_array_specs.values() %} + if({{varname}}!=0) + { + delete [] {{varname}}; + {{varname}} = 0; + } + {% endif %} + {% endfor %} + + // static arrays + {% for (name, dtype_spec, N, filename) in static_array_specs | sort %} + {% if not name in dynamic_array_specs.values() %} + if({{name}}!=0) + { + delete [] {{name}}; + {{name}} = 0; + } + {% endif %} + {% endfor %} } {% endmacro %} @@ -369,15 +374,67 @@ void _dealloc_arrays() #include "brianlib/dynamic_array.h" #include "brianlib/stdint_compat.h" #include "network.h" -#include "randomkit.h" +#include #include {{ openmp_pragma('include') }} namespace brian { extern std::string results_dir; + +class RandomGenerator { + private: + std::mt19937 gen; + double stored_gauss; + bool has_stored_gauss = false; + public: + RandomGenerator() { + seed(); + } + void seed() { + std::random_device rd; + gen.seed(rd()); + has_stored_gauss = false; + } + void seed(unsigned long seed) { + gen.seed(seed); + has_stored_gauss = false; + } + double rand() { + /* shifts : 67108864 = 0x4000000, 9007199254740992 = 0x20000000000000 */ + const long a = gen() >> 5; + const long b = gen() >> 6; + return (a * 67108864.0 + b) / 9007199254740992.0; + } + + double randn() { + if (has_stored_gauss) { + const double tmp = stored_gauss; + has_stored_gauss = false; + return tmp; + } + else { + double f, x1, x2, r2; + + do { + x1 = 2.0*rand() - 1.0; + x2 = 2.0*rand() - 1.0; + r2 = x1*x1 + x2*x2; + } + while (r2 >= 1.0 || r2 == 0.0); + + /* Box-Muller transform */ + f = sqrt(-2.0*log(r2)/r2); + /* Keep for next call */ + stored_gauss = f*x1; + has_stored_gauss = true; + return f*x2; + } + } +}; + // In OpenMP we need one state per thread -extern std::vector< rk_state* > _mersenne_twister_states; +extern std::vector< RandomGenerator > _random_generators; //////////////// clocks /////////////////// {% for clock in clocks | sort(attribute='name') %} diff --git a/brian2/devices/cpp_standalone/templates/run.cpp b/brian2/devices/cpp_standalone/templates/run.cpp index 20a429bbf..798493dd0 100644 --- a/brian2/devices/cpp_standalone/templates/run.cpp +++ b/brian2/devices/cpp_standalone/templates/run.cpp @@ -2,7 +2,7 @@ #include #include "objects.h" #include -#include "randomkit.h" +#include {% for codeobj in code_objects | sort(attribute='name') %} #include "code_objects/{{codeobj.name}}.h" @@ -22,8 +22,6 @@ void brian_start() brian::{{clock.name}}.dt = brian::{{array_specs[clock.variables['dt']]}}; brian::{{clock.name}}.t = brian::{{array_specs[clock.variables['t']]}}; {% endfor %} - for (int i=0; i<{{openmp_pragma('get_num_threads')}}; i++) - rk_randomseed(brian::_mersenne_twister_states[i]); // Note that this seed can be potentially replaced in main.cpp } void brian_end() diff --git a/brian2/parsing/functions.py b/brian2/parsing/functions.py index 29504b47c..53daa98dd 100644 --- a/brian2/parsing/functions.py +++ b/brian2/parsing/functions.py @@ -146,8 +146,6 @@ def visit_Call(self, node): func=ast.Name(id=node.func.id, ctx=ast.Load()), args=args, keywords=[], - starargs=None, - kwargs=None, ) diff --git a/brian2/parsing/rendering.py b/brian2/parsing/rendering.py index b60361a99..e48267208 100644 --- a/brian2/parsing/rendering.py +++ b/brian2/parsing/rendering.py @@ -102,8 +102,7 @@ def render_Call(self, node): raise ValueError("Keyword arguments not supported") else: if node.func.id in self.auto_vectorise: - vectorisation_idx = ast.Name() - vectorisation_idx.id = "_vectorisation_idx" + vectorisation_idx = ast.Name("_vectorisation_idx") args = node.args + [vectorisation_idx] else: args = node.args diff --git a/brian2/random/randomkit/randomkit.c b/brian2/random/randomkit/randomkit.c deleted file mode 100644 index 169c288dc..000000000 --- a/brian2/random/randomkit/randomkit.c +++ /dev/null @@ -1,402 +0,0 @@ -/* Random kit 1.3 */ - -/* - * Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org) - * - * The rk_random and rk_seed functions algorithms and the original design of - * the Mersenne Twister RNG: - * - * Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * 3. The names of its contributors may not be used to endorse or promote - * products derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Original algorithm for the implementation of rk_interval function from - * Richard J. Wagner's implementation of the Mersenne Twister RNG, optimised by - * Magnus Jonsson. - * - * Constants used in the rk_double implementation by Isaku Wada. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* static char const rcsid[] = - "@(#) $Jeannot: randomkit.c,v 1.28 2005/07/21 22:14:09 js Exp $"; */ -#include -#include -#include -#include -#include -#include - -#ifdef _WIN32 -/* - * Windows - * XXX: we have to use this ugly defined(__GNUC__) because it is not easy to - * detect the compiler used in distutils itself - */ -#if (defined(__GNUC__) && defined(NPY_NEEDS_MINGW_TIME_WORKAROUND)) - -/* - * FIXME: ideally, we should set this to the real version of MSVCRT. We need - * something higher than 0x601 to enable _ftime64 and co - */ -#define __MSVCRT_VERSION__ 0x0700 -#include -#include - -/* - * mingw msvcr lib import wrongly export _ftime, which does not exist in the - * actual msvc runtime for version >= 8; we make it an alias to _ftime64, which - * is available in those versions of the runtime - */ -#define _FTIME(x) _ftime64((x)) -#else -#include -#include -#define _FTIME(x) _ftime((x)) -#endif - -#ifndef RK_NO_WINCRYPT -/* Windows crypto */ -#ifndef _WIN32_WINNT -#define _WIN32_WINNT 0x0400 -#endif -#include -#include -#endif - -#else -/* Unix */ -#include -#include -#include -#endif - -#include "randomkit.h" - -#ifndef RK_DEV_URANDOM -#define RK_DEV_URANDOM "/dev/urandom" -#endif - -#ifndef RK_DEV_RANDOM -#define RK_DEV_RANDOM "/dev/random" -#endif - -char *rk_strerror[RK_ERR_MAX] = -{ - "no error", - "random device unvavailable" -}; - -/* static functions */ -static unsigned long rk_hash(unsigned long key); - -void -rk_seed(unsigned long seed, rk_state *state) -{ - int pos; - seed &= 0xffffffffUL; - - /* Knuth's PRNG as used in the Mersenne Twister reference implementation */ - for (pos = 0; pos < RK_STATE_LEN; pos++) { - state->key[pos] = seed; - seed = (1812433253UL * (seed ^ (seed >> 30)) + pos + 1) & 0xffffffffUL; - } - state->pos = RK_STATE_LEN; - state->gauss = 0; - state->has_gauss = 0; - state->has_binomial = 0; -} - -/* Thomas Wang 32 bits integer hash function */ -unsigned long -rk_hash(unsigned long key) -{ - key += ~(key << 15); - key ^= (key >> 10); - key += (key << 3); - key ^= (key >> 6); - key += ~(key << 11); - key ^= (key >> 16); - return key; -} - -rk_error -rk_randomseed(rk_state *state) -{ -#ifndef _WIN32 - struct timeval tv; -#else - struct _timeb tv; -#endif - int i; - - if (rk_devfill(state->key, sizeof(state->key), 0) == RK_NOERR) { - /* ensures non-zero key */ - state->key[0] |= 0x80000000UL; - state->pos = RK_STATE_LEN; - state->gauss = 0; - state->has_gauss = 0; - state->has_binomial = 0; - - for (i = 0; i < 624; i++) { - state->key[i] &= 0xffffffffUL; - } - return RK_NOERR; - } - -#ifndef _WIN32 - gettimeofday(&tv, NULL); - rk_seed(rk_hash(getpid()) ^ rk_hash(tv.tv_sec) ^ rk_hash(tv.tv_usec) - ^ rk_hash(clock()), state); -#else - _FTIME(&tv); - rk_seed(rk_hash(tv.time) ^ rk_hash(tv.millitm) ^ rk_hash(clock()), state); -#endif - - return RK_ENODEV; -} - -/* Magic Mersenne Twister constants */ -#define N 624 -#define M 397 -#define MATRIX_A 0x9908b0dfUL -#define UPPER_MASK 0x80000000UL -#define LOWER_MASK 0x7fffffffUL - -/* Slightly optimised reference implementation of the Mersenne Twister */ -unsigned long -rk_random(rk_state *state) -{ - unsigned long y; - - if (state->pos == RK_STATE_LEN) { - int i; - - for (i = 0; i < N - M; i++) { - y = (state->key[i] & UPPER_MASK) | (state->key[i+1] & LOWER_MASK); - state->key[i] = state->key[i+M] ^ (y>>1) ^ (-(y & 1) & MATRIX_A); - } - for (; i < N - 1; i++) { - y = (state->key[i] & UPPER_MASK) | (state->key[i+1] & LOWER_MASK); - state->key[i] = state->key[i+(M-N)] ^ (y>>1) ^ (-(y & 1) & MATRIX_A); - } - y = (state->key[N - 1] & UPPER_MASK) | (state->key[0] & LOWER_MASK); - state->key[N - 1] = state->key[M - 1] ^ (y >> 1) ^ (-(y & 1) & MATRIX_A); - - state->pos = 0; - } - y = state->key[state->pos++]; - - /* Tempering */ - y ^= (y >> 11); - y ^= (y << 7) & 0x9d2c5680UL; - y ^= (y << 15) & 0xefc60000UL; - y ^= (y >> 18); - - return y; -} - -long -rk_long(rk_state *state) -{ - return rk_ulong(state) >> 1; -} - -unsigned long -rk_ulong(rk_state *state) -{ -#if ULONG_MAX <= 0xffffffffUL - return rk_random(state); -#else - return (rk_random(state) << 32) | (rk_random(state)); -#endif -} - -unsigned long -rk_interval(unsigned long max, rk_state *state) -{ - unsigned long mask = max, value; - - if (max == 0) { - return 0; - } - /* Smallest bit mask >= max */ - mask |= mask >> 1; - mask |= mask >> 2; - mask |= mask >> 4; - mask |= mask >> 8; - mask |= mask >> 16; -#if ULONG_MAX > 0xffffffffUL - mask |= mask >> 32; -#endif - - /* Search a random value in [0..mask] <= max */ -#if ULONG_MAX > 0xffffffffUL - if (max <= 0xffffffffUL) { - while ((value = (rk_random(state) & mask)) > max); - } - else { - while ((value = (rk_ulong(state) & mask)) > max); - } -#else - while ((value = (rk_ulong(state) & mask)) > max); -#endif - return value; -} - -double -rk_double(rk_state *state) -{ - /* shifts : 67108864 = 0x4000000, 9007199254740992 = 0x20000000000000 */ - long a = rk_random(state) >> 5, b = rk_random(state) >> 6; - return (a * 67108864.0 + b) / 9007199254740992.0; -} - -void -rk_fill(void *buffer, size_t size, rk_state *state) -{ - unsigned long r; - unsigned char *buf = (unsigned char *)buffer; - - for (; size >= 4; size -= 4) { - r = rk_random(state); - *(buf++) = r & 0xFF; - *(buf++) = (r >> 8) & 0xFF; - *(buf++) = (r >> 16) & 0xFF; - *(buf++) = (r >> 24) & 0xFF; - } - - if (!size) { - return; - } - r = rk_random(state); - for (; size; r >>= 8, size --) { - *(buf++) = (unsigned char)(r & 0xFF); - } -} - -rk_error -rk_devfill(void *buffer, size_t size, int strong) -{ -#ifndef _WIN32 - FILE *rfile; - int done; - - if (strong) { - rfile = fopen(RK_DEV_RANDOM, "rb"); - } - else { - rfile = fopen(RK_DEV_URANDOM, "rb"); - } - if (rfile == NULL) { - return RK_ENODEV; - } - done = fread(buffer, size, 1, rfile); - fclose(rfile); - if (done) { - return RK_NOERR; - } -#else - -#ifndef RK_NO_WINCRYPT - HCRYPTPROV hCryptProv; - BOOL done; - - if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, - CRYPT_VERIFYCONTEXT) || !hCryptProv) { - return RK_ENODEV; - } - done = CryptGenRandom(hCryptProv, size, (unsigned char *)buffer); - CryptReleaseContext(hCryptProv, 0); - if (done) { - return RK_NOERR; - } -#endif - -#endif - return RK_ENODEV; -} - -rk_error -rk_altfill(void *buffer, size_t size, int strong, rk_state *state) -{ - rk_error err; - - err = rk_devfill(buffer, size, strong); - if (err) { - rk_fill(buffer, size, state); - } - return err; -} - -double -rk_gauss(rk_state *state) -{ - if (state->has_gauss) { - const double tmp = state->gauss; - state->gauss = 0; - state->has_gauss = 0; - return tmp; - } - else { - double f, x1, x2, r2; - - do { - x1 = 2.0*rk_double(state) - 1.0; - x2 = 2.0*rk_double(state) - 1.0; - r2 = x1*x1 + x2*x2; - } - while (r2 >= 1.0 || r2 == 0.0); - - /* Box-Muller transform */ - f = sqrt(-2.0*log(r2)/r2); - /* Keep for next call */ - state->gauss = f*x1; - state->has_gauss = 1; - return f*x2; - } -} diff --git a/brian2/random/randomkit/randomkit.h b/brian2/random/randomkit/randomkit.h deleted file mode 100644 index e049488ee..000000000 --- a/brian2/random/randomkit/randomkit.h +++ /dev/null @@ -1,189 +0,0 @@ -/* Random kit 1.3 */ - -/* - * Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org) - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* @(#) $Jeannot: randomkit.h,v 1.24 2005/07/21 22:14:09 js Exp $ */ - -/* - * Typical use: - * - * { - * rk_state state; - * unsigned long seed = 1, random_value; - * - * rk_seed(seed, &state); // Initialize the RNG - * ... - * random_value = rk_random(&state); // Generate random values in [0..RK_MAX] - * } - * - * Instead of rk_seed, you can use rk_randomseed which will get a random seed - * from /dev/urandom (or the clock, if /dev/urandom is unavailable): - * - * { - * rk_state state; - * unsigned long random_value; - * - * rk_randomseed(&state); // Initialize the RNG with a random seed - * ... - * random_value = rk_random(&state); // Generate random values in [0..RK_MAX] - * } - */ - -/* - * Useful macro: - * RK_DEV_RANDOM: the device used for random seeding. - * defaults to "/dev/urandom" - */ - -#include - -#ifndef _RANDOMKIT_ -#define _RANDOMKIT_ - -#define RK_STATE_LEN 624 - -typedef struct rk_state_ -{ - unsigned long key[RK_STATE_LEN]; - int pos; - int has_gauss; /* !=0: gauss contains a gaussian deviate */ - double gauss; - - /* The rk_state structure has been extended to store the following - * information for the binomial generator. If the input values of n or p - * are different than nsave and psave, then the other parameters will be - * recomputed. RTK 2005-09-02 */ - - int has_binomial; /* !=0: following parameters initialized for - binomial */ - double psave; - long nsave; - double r; - double q; - double fm; - long m; - double p1; - double xm; - double xl; - double xr; - double c; - double laml; - double lamr; - double p2; - double p3; - double p4; - -} -rk_state; - -typedef enum { - RK_NOERR = 0, /* no error */ - RK_ENODEV = 1, /* no RK_DEV_RANDOM device */ - RK_ERR_MAX = 2 -} rk_error; - -/* error strings */ -extern char *rk_strerror[RK_ERR_MAX]; - -/* Maximum generated random value */ -#define RK_MAX 0xFFFFFFFFUL - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * Initialize the RNG state using the given seed. - */ -extern void rk_seed(unsigned long seed, rk_state *state); - -/* - * Initialize the RNG state using a random seed. - * Uses /dev/random or, when unavailable, the clock (see randomkit.c). - * Returns RK_NOERR when no errors occurs. - * Returns RK_ENODEV when the use of RK_DEV_RANDOM failed (for example because - * there is no such device). In this case, the RNG was initialized using the - * clock. - */ -extern rk_error rk_randomseed(rk_state *state); - -/* - * Returns a random unsigned long between 0 and RK_MAX inclusive - */ -extern unsigned long rk_random(rk_state *state); - -/* - * Returns a random long between 0 and LONG_MAX inclusive - */ -extern long rk_long(rk_state *state); - -/* - * Returns a random unsigned long between 0 and ULONG_MAX inclusive - */ -extern unsigned long rk_ulong(rk_state *state); - -/* - * Returns a random unsigned long between 0 and max inclusive. - */ -extern unsigned long rk_interval(unsigned long max, rk_state *state); - -/* - * Returns a random double between 0.0 and 1.0, 1.0 excluded. - */ -extern double rk_double(rk_state *state); - -/* - * fill the buffer with size random bytes - */ -extern void rk_fill(void *buffer, size_t size, rk_state *state); - -/* - * fill the buffer with randombytes from the random device - * Returns RK_ENODEV if the device is unavailable, or RK_NOERR if it is - * On Unix, if strong is defined, RK_DEV_RANDOM is used. If not, RK_DEV_URANDOM - * is used instead. This parameter has no effect on Windows. - * Warning: on most unixes RK_DEV_RANDOM will wait for enough entropy to answer - * which can take a very long time on quiet systems. - */ -extern rk_error rk_devfill(void *buffer, size_t size, int strong); - -/* - * fill the buffer using rk_devfill if the random device is available and using - * rk_fill if is is not - * parameters have the same meaning as rk_fill and rk_devfill - * Returns RK_ENODEV if the device is unavailable, or RK_NOERR if it is - */ -extern rk_error rk_altfill(void *buffer, size_t size, int strong, - rk_state *state); - -/* - * return a random gaussian deviate with variance unity and zero mean. - */ -extern double rk_gauss(rk_state *state); - -#ifdef __cplusplus -} -#endif - -#endif /* _RANDOMKIT_ */ diff --git a/brian2/tests/test_codegen.py b/brian2/tests/test_codegen.py index 1a3f1def8..fdc03cdf2 100644 --- a/brian2/tests/test_codegen.py +++ b/brian2/tests/test_codegen.py @@ -450,7 +450,7 @@ def test_automatic_augmented_assignments(): ("x = x - 3.0", "x += -3.0"), ("x = x/2.0", "x *= 0.5"), ("x = y + (x + 1.0)", "x += y + 1.0"), - ("x = x + x", "x *= 2.0"), + ("x = x + x", "x *= 2"), ("x = x + y + z", "x += y + z"), ("x = x + y + z", "x += y + z"), # examples that should not be rewritten diff --git a/brian2/tests/test_cpp_standalone.py b/brian2/tests/test_cpp_standalone.py index 7a23c3c26..90bf3e262 100644 --- a/brian2/tests/test_cpp_standalone.py +++ b/brian2/tests/test_cpp_standalone.py @@ -567,23 +567,34 @@ def test_profile_via_set_device_arg(): @pytest.mark.cpp_standalone @pytest.mark.standalone_only def test_delete_code_data(): - set_device("cpp_standalone", build_on_run=True, directory=None) + set_device("cpp_standalone", build_on_run=False) group = NeuronGroup(10, "dv/dt = -v / (10*ms) : volt", method="exact") group.v = np.arange(10) * mV # uses the static array mechanism run(defaultclock.dt) + # Overwrite the initial values via run_args mechanims + device.build(run_args={group.v: np.arange(10)[::-1] * mV}, directory=None) results_dir = os.path.join(device.project_dir, "results") assert os.path.exists(results_dir) and os.path.isdir(results_dir) # There should be 3 files for the clock, 2 for the neurongroup (index + v), # and the "last_run_info.txt" file assert len(os.listdir(results_dir)) == 6 - device.delete(data=True, code=False, directory=False) + device.delete(data=True, run_args=False, code=False, directory=False) assert os.path.exists(results_dir) and os.path.isdir(results_dir) assert len(os.listdir(results_dir)) == 0 - assert len(os.listdir(os.path.join(device.project_dir, "static_arrays"))) > 0 + static_arrays_before = len( + os.listdir(os.path.join(device.project_dir, "static_arrays")) + ) + assert static_arrays_before > 0 assert len(os.listdir(os.path.join(device.project_dir, "code_objects"))) > 0 - device.delete(data=False, code=True, directory=False) - assert len(os.listdir(os.path.join(device.project_dir, "static_arrays"))) == 0 + device.delete(data=False, code=True, run_args=False, directory=False) + assert ( + 0 + < len(os.listdir(os.path.join(device.project_dir, "static_arrays"))) + < static_arrays_before + ) assert len(os.listdir(os.path.join(device.project_dir, "code_objects"))) == 0 + device.delete(data=False, code=False, run_args=True, directory=False) + len(os.listdir(os.path.join(device.project_dir, "static_arrays"))) == 0 @pytest.mark.cpp_standalone @@ -599,11 +610,29 @@ def test_delete_directory(): assert os.path.isfile(dummy_file) with catch_logs() as logs: device.delete(directory=True) - assert len(logs) == 1 + assert ( + len( + [ + l + for l in logs + if l[1] == "brian2.devices.cpp_standalone.device.delete_skips_directory" + ] + ) + == 1 + ) assert os.path.isfile(dummy_file) with catch_logs() as logs: device.delete(directory=True, force=True) - assert len(logs) == 0 + assert ( + len( + [ + l + for l in logs + if l[1] == "brian2.devices.cpp_standalone.device.delete_skips_directory" + ] + ) + == 0 + ) # everything should be deleted assert not os.path.exists(device.project_dir) diff --git a/brian2/tests/test_neurongroup.py b/brian2/tests/test_neurongroup.py index 53d442a15..28b094f11 100644 --- a/brian2/tests/test_neurongroup.py +++ b/brian2/tests/test_neurongroup.py @@ -11,7 +11,7 @@ from brian2.core.network import Network from brian2.core.preferences import prefs from brian2.core.variables import linked_var -from brian2.devices.device import device, seed +from brian2.devices.device import device, get_device, seed from brian2.equations.equations import Equations from brian2.groups.group import get_dtype from brian2.groups.neurongroup import NeuronGroup @@ -19,7 +19,11 @@ from brian2.synapses.synapses import Synapses from brian2.tests.utils import assert_allclose, exc_isinstance from brian2.units.allunits import second, volt -from brian2.units.fundamentalunits import DimensionMismatchError, have_same_dimensions +from brian2.units.fundamentalunits import ( + DIMENSIONLESS, + DimensionMismatchError, + have_same_dimensions, +) from brian2.units.stdunits import Hz, ms, mV from brian2.units.unitsafefunctions import linspace from brian2.utils.logger import catch_logs @@ -147,6 +151,31 @@ def test_variableview_calculations(): 2**G.y # raising to a power with units +@pytest.mark.standalone_compatible +def test_variableview_properties(): + G = NeuronGroup( + 10, + """ + x : 1 + y : volt + idx : integer + """, + ) + # The below properties should not require access to the values + G.x = "rand()" + G.y = "rand()*mV" + G.idx = "int(rand()*10)" + + assert have_same_dimensions(G.x.unit, DIMENSIONLESS) + assert have_same_dimensions(G.y.unit, volt) + assert have_same_dimensions(G.idx.unit, DIMENSIONLESS) + + assert G.x.shape == G.y.shape == G.idx.shape == (10,) + assert G.x.ndim == G.y.ndim == G.idx.ndim == 1 + assert G.x.dtype == G.y.dtype == prefs.core.default_float_dtype + assert G.idx.dtype == np.int32 + + @pytest.mark.codegen_independent def test_variableview_inplace_calculations(): # Check that you can directly do in-place calculation with "variable views" @@ -1957,6 +1986,60 @@ def test_random_values_fixed_seed(): assert_allclose(G.v1[:], G.v2[:]) +_random_values = { + ("RuntimeDevice", "numpy", None): ( + [0.1636023, 0.76229608, 0.74945305, 0.82121212, 0.82669968], + [-0.7758696, 0.13295831, 0.87360834, -1.21879122, 0.62980314], + ), + ("RuntimeDevice", "cython", None): ( + [0.1636023, 0.76229608, 0.74945305, 0.82121212, 0.82669968], + [-0.7758696, 0.13295831, 0.87360834, -1.21879122, 0.62980314], + ), + ("CPPStandaloneDevice", "cython", 1): ( + [0.1636023, 0.76229608, 0.74945305, 0.82121212, 0.82669968], + [-0.7758696, 0.13295831, 0.87360834, -1.21879122, 0.62980314], + ), + ("CPPStandaloneDevice", None, 4): ( + [0.1636023, 0.76229608, 0.27318909, 0.44124824, 0.69454226], + [0.36643979, -1.53883951, 0.07274151, 1.34278769, 0.63249739], + ), +} + + +def _config_tuple(): + config = [ + get_device().__class__.__name__, + prefs.codegen.target, + prefs.devices.cpp_standalone.openmp_threads, + ] + if config[0] == "RuntimeDevice": + config[2] = None + else: + config[1] = None + return tuple(config) + + +@pytest.mark.standalone_compatible +def test_random_values_fixed_seed_numbers(): + # Verify a subset of random numbers, to make sure these numbers stay the same across updates + G = NeuronGroup( + 100, + """ + v1 : 1 + v2 : 1 + """, + ) + seed(9876) + G.v1 = "rand()" + G.v2 = "randn()" + run(0 * ms) # for standalone + expected_values = _random_values.get(_config_tuple(), None) + if expected_values is None: + pytest.skip("Random values not known for this configuration") + assert_allclose(G.v1[::20], expected_values[0]) + assert_allclose(G.v2[::20], expected_values[1]) + + @pytest.mark.standalone_compatible @pytest.mark.multiple_runs def test_random_values_fixed_and_random(): diff --git a/brian2/tests/test_subgroup.py b/brian2/tests/test_subgroup.py index c1849e598..c3e99664b 100644 --- a/brian2/tests/test_subgroup.py +++ b/brian2/tests/test_subgroup.py @@ -5,6 +5,7 @@ from brian2.core.network import schedule_propagation_offset from brian2.devices.device import reinit_and_delete from brian2.tests.utils import assert_allclose +from brian2.units.fundamentalunits import DIMENSIONLESS from brian2.utils.logger import catch_logs @@ -132,6 +133,90 @@ def test_state_variables_group_as_index_problematic(): ) +@pytest.mark.standalone_compatible +def test_variableview_calculations(): + # Check that you can directly calculate with "variable views" + G = NeuronGroup( + 10, + """ + x : 1 + y : volt + idx : integer + """, + ) + G.x = np.arange(10) + G.y = np.arange(10)[::-1] * mV + G.idx = np.arange(10, dtype=int) + SG = G[3:8] + + assert_allclose(SG.x * SG.y, np.arange(3, 8) * np.arange(6, 1, -1) * mV) + assert_allclose(-SG.x, -np.arange(3, 8)) + assert_allclose(-SG.y, -np.arange(6, 1, -1) * mV) + + assert_allclose(3 * SG.x, 3 * np.arange(3, 8)) + assert_allclose(3 * SG.y, 3 * np.arange(6, 1, -1) * mV) + assert_allclose(SG.x * 3, 3 * np.arange(3, 8)) + assert_allclose(SG.y * 3, 3 * np.arange(6, 1, -1) * mV) + assert_allclose(SG.x / 2.0, np.arange(3, 8) / 2.0) + assert_allclose(SG.y / 2, np.arange(6, 1, -1) * mV / 2) + assert_equal(SG.idx % 2, np.arange(3, 8, dtype=int) % 2) + assert_allclose(SG.x + 2, 2 + np.arange(3, 8)) + assert_allclose(SG.y + 2 * mV, 2 * mV + np.arange(6, 1, -1) * mV) + assert_allclose(2 + SG.x, 2 + np.arange(3, 8)) + assert_allclose(2 * mV + SG.y, 2 * mV + np.arange(6, 1, -1) * mV) + assert_allclose(SG.x - 2, np.arange(3, 8) - 2) + assert_allclose(SG.y - 2 * mV, np.arange(6, 1, -1) * mV - 2 * mV) + assert_allclose(2 - SG.x, 2 - np.arange(3, 8)) + assert_allclose(2 * mV - SG.y, 2 * mV - np.arange(6, 1, -1) * mV) + assert_allclose(SG.x**2, np.arange(3, 8) ** 2) + assert_allclose(SG.y**2, (np.arange(6, 1, -1) * mV) ** 2) + assert_allclose(2**SG.x, 2 ** np.arange(3, 8)) + + # incorrect units + with pytest.raises(DimensionMismatchError): + SG.x + SG.y + with pytest.raises(DimensionMismatchError): + SG.x[:] + SG.y + with pytest.raises(DimensionMismatchError): + SG.x + SG.y[:] + with pytest.raises(DimensionMismatchError): + SG.x + 3 * mV + with pytest.raises(DimensionMismatchError): + 3 * mV + SG.x + with pytest.raises(DimensionMismatchError): + SG.y + 3 + with pytest.raises(DimensionMismatchError): + 3 + SG.y + with pytest.raises(TypeError): + 2**SG.y # raising to a power with units + + +@pytest.mark.standalone_compatible +def test_variableview_properties(): + G = NeuronGroup( + 10, + """ + x : 1 + y : volt + idx : integer + """, + ) + # The below properties should not require access to the values + G.x = "rand()" + G.y = "rand()*mV" + G.idx = "int(rand()*10)" + SG = G[3:8] + + assert have_same_dimensions(SG.x.unit, DIMENSIONLESS) + assert have_same_dimensions(SG.y.unit, volt) + assert have_same_dimensions(SG.idx.unit, DIMENSIONLESS) + # See github issue #1555 + assert SG.x.shape == SG.y.shape == SG.idx.shape == (5,) + assert SG.x.ndim == SG.y.ndim == SG.idx.ndim == 1 + assert SG.x.dtype == SG.y.dtype == prefs.core.default_float_dtype + assert SG.idx.dtype == np.int32 + + @pytest.mark.standalone_compatible def test_state_monitor(): G = NeuronGroup(10, "v : volt") diff --git a/dev/continuous-integration/update_zenodo_swh.py b/dev/continuous-integration/update_zenodo_swh.py new file mode 100644 index 000000000..6756c6c51 --- /dev/null +++ b/dev/continuous-integration/update_zenodo_swh.py @@ -0,0 +1,120 @@ +""" +Update CITATION.cff and README.md with the latest Zenodo and Software Heritage records +""" +import os +import re +import subprocess + +import pyaml +import requests +import yaml + +basedir = os.path.abspath( + os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..") +) + +# Get the latest tag from git +tag = ( + subprocess.check_output(["git", "describe", "--tags", "--abbrev=0"]) + .strip() + .decode("utf-8") +) + +print("Latest tag/release:", tag) +ZENODO_API = "https://zenodo.org/api" +# DOI always linking to the latest version (escaped slash for elasticsearch syntax) +CONCEPT_DOI = r"10.5281\/zenodo.654861" + +print("Searching for Zenodo record...") +# Get the latest version via concept doi +# The Zenodo search guide says that one can directly search by "version:...", but it does not work (BAD REQUEST) +r = requests.get(ZENODO_API + "/records", params={"q": f"conceptdoi:{CONCEPT_DOI}"}) +if not r.ok: + raise RuntimeError("Request failed: ", r.reason) +data = r.json() +assert data["hits"]["total"] == 1 +latest_zenodo_id = data["hits"]["hits"][0]["id"] + +# Get all versions +r = requests.get(ZENODO_API + f"/records/{latest_zenodo_id}/versions") +if not r.ok: + raise RuntimeError("Request failed: ", r.reason) +data = r.json() +versions = data["hits"]["hits"] +latest_version = [v for v in versions if v["metadata"]["version"] == tag] +if latest_version: + zenodo_record = latest_version[0] + print("Found Zenodo record for version", tag) +else: + raise RuntimeError("No Zenodo record found for version " + tag) + +print("Searching for SWH record...") + +# Find Software Heritage +resp = requests.get( + "https://archive.softwareheritage.org/api/1/origin/https://github.com/brian-team/brian2/get/" +) +if not resp.ok: + raise RuntimeError("Request failed: ", resp.reason) +data = resp.json() +visits_url = data["origin_visits_url"] +resp = requests.get(visits_url) +if not resp.ok: + raise RuntimeError("Request failed: ", resp.reason) +data = resp.json() +latest_visit = sorted(data, key=lambda x: x["date"], reverse=True)[0] +snapshot_url = latest_visit["snapshot_url"] +resp = requests.get(snapshot_url) +if not resp.ok: + raise RuntimeError("Request failed: ", resp.reason) +data = resp.json() +swh_record = data["branches"].get(f"refs/tags/{tag}") + +if swh_record: + print("Found SWH record for version", tag) +else: + swh_record = None + print("No SWH record found for version", tag) + +print("Updating CITATION.cff and README.md...") +with open(os.path.join(basedir, "CITATION.cff"), "r") as f: + citation_cff = yaml.load(f, Loader=yaml.SafeLoader) +for identifier in citation_cff["identifiers"]: + if zenodo_record and identifier["description"].startswith( + "This is the archived snapshot of version" + ): + identifier["value"] = zenodo_record["metadata"]["doi"] + identifier["description"] = ( + f"This is the archived snapshot of version {tag} of Brian 2" + ) + if swh_record and identifier["type"] == "swh": + identifier["value"] = f"swh:1:rel:{swh_record['target']}" + identifier["description"] = ( + f"Software Heritage identifier for version {tag} of Brian 2" + ) + +with open(os.path.join(basedir, "CITATION.cff"), "w") as f: + pyaml.dump(citation_cff, f, sort_keys=False) + +with open(os.path.join(basedir, "README.md"), "r") as f: + readme = f.read() + +if zenodo_record: + # Replace the old DOI with the new one + readme = re.sub( + r"\[!\[DOI\]\(https://zenodo.org/badge/DOI/.*\.svg\)\]\(https://zenodo.org/doi/.*\)", + f"[![DOI](https://zenodo.org/badge/DOI/{zenodo_record['metadata']['doi']}.svg)](https://zenodo.org/doi/{zenodo_record['metadata']['doi']})", + readme, + ) + +if swh_record: + # Replace the old SWH badge with the new one + # [![Software Heritage (release)](https://archive.softwareheritage.org/badge/swh:1:rel:2d4c5c8c8a6d2318332889df93ab74aef53e2c61/)](https://archive.softwareheritage.org/swh:1:rel:2d4c5c8c8a6d2318332889df93ab74aef53e2c61;origin=https://github.com/brian-team/brian2;visit=swh:1:snp:a90ab7416901a9c5cf6f56d68b3455c65d322afc) + readme = re.sub( + r"\[!\[Software Heritage \(release\)\]\(https://archive.softwareheritage.org/badge/swh:1:rel:.*\)\]\(https://archive.softwareheritage.org/swh:1:rel:.*;origin=.*\)", + f"[![Software Heritage (release)](https://archive.softwareheritage.org/badge/swh:1:rel:{swh_record['target']}/)](https://archive.softwareheritage.org/swh:1:rel:{swh_record['target']};origin=https://github.com/brian-team/brian2;visit=swh:1:snp:{latest_visit['snapshot']})", + readme, + ) + +with open(os.path.join(basedir, "README.md"), "w") as f: + f.write(readme) diff --git a/dev/tools/release/release.py b/dev/tools/release/release.py index e6391f549..0d15ac515 100644 --- a/dev/tools/release/release.py +++ b/dev/tools/release/release.py @@ -2,14 +2,19 @@ import brian2 +basedir = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "..")) + # Ask for version number print('Current version is: ' + brian2.__version__) version = input('Enter new Brian2 version number: ').strip() +# Run script to update codemeta.json +os.system(f'python {basedir}/.codemeta/create_codemeta.py {version}') + # commit -os.system('git commit -a -v --allow-empty -m "***** Release Brian2 %s *****"' % version) +os.system(f'git add {basedir}/codemeta.json && git commit -m "***** Release Brian2 {version} *****"') # add tag -os.system('git tag -a -m "Release Brian2 %s" %s' % (version, version)) +os.system(f'git tag -a -m "Release Brian2 {version}" {version}') # print commands necessary for pushing print('Review the last commit: ') diff --git a/docker/Dockerfile b/docker/Dockerfile index dd8fbd9fa..43721cac0 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -36,13 +36,11 @@ RUN python -m pip install --no-cache-dir --only-binary=:all: \ && python -m pip install /tmp/dist/Brian2*_$(uname -m).whl brian2tools \ && rm -rf /tmp/dist -# Create a non-root user -ARG USER="monty" \ - GROUP="monty" \ - PASSWORD="monty" -RUN groupadd ${GROUP} && \ - useradd -ms /bin/bash -g ${GROUP} -G sudo ${USER} && \ - echo "${USER}:${PASSWORD}" | chpasswd +# Create a non-root user (password same as username) +ARG USER="monty" +RUN groupadd ${USER} && \ + useradd -ms /bin/bash -g ${USER} -G sudo ${USER} && \ + echo "${USER}:${USER}" | chpasswd ENV HOME="/home/${USER}" RUN chown -R ${USER}:${USER} ${HOME} USER ${USER} diff --git a/examples/frompapers/Maass_Natschlaeger_Markram_2002.py b/examples/frompapers/Maass_Natschlaeger_Markram_2002.py index 7977b15a5..fedd51218 100755 --- a/examples/frompapers/Maass_Natschlaeger_Markram_2002.py +++ b/examples/frompapers/Maass_Natschlaeger_Markram_2002.py @@ -244,7 +244,7 @@ def get_synapses(name, source, target, C, l, tau_I, A, U, D, F, delay): synapses.A[:] = np.sign(A / nA) * np.random.gamma(1, abs(A / nA), size=N_syn) * nA - synapses.U[:] = np.random.normal(U, 0.5, size=N_syn) + synapses.U[:] = np.random.normal(U, 0.5 * U, size=N_syn) # paper samples from uniform, we take the mean synapses.U[:][synapses.U < 0] = U diff --git a/examples/multiprocessing/standalone307987/main b/examples/multiprocessing/standalone307987/main deleted file mode 100755 index c1dfefcf3..000000000 Binary files a/examples/multiprocessing/standalone307987/main and /dev/null differ diff --git a/pyproject.toml b/pyproject.toml index aab005e37..a2b31ed7c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -31,7 +31,7 @@ classifiers = [ ] [project.optional-dependencies] -test = ['pytest', 'pytest-xdist>=1.22.3', 'pytest-cov>=2.0', 'pytest-timeout'] +test = ['pytest>=8', 'pytest-xdist>=1.22.3', 'pytest-cov>=2.0', 'pytest-timeout'] docs = ['sphinx>=7', 'ipython>=5', 'sphinx-tabs'] [project.urls]