diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 08b68fab..d2990f5e 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -1,54 +1,54 @@ -name: CI -on: - push: - branches: - - 'master' - - 'release-' - tags: '*' - pull_request: - -jobs: - test: - name: Julia ${{ matrix.version }} - ${{ matrix.os }} - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - version: - - '1.6' - - '1' - # - '~1.9.0-0' - - 'nightly' - os: - - ubuntu-latest - include: - - os: macOS-latest - version: '1' - - os: windows-latest - version: '1' - steps: - - uses: actions/checkout@v2 - - uses: julia-actions/setup-julia@v1 - with: - version: ${{ matrix.version }} - - uses: julia-actions/cache@v1 - - uses: julia-actions/julia-buildpkg@v1 - - uses: julia-actions/julia-runtest@v1 - - uses: julia-actions/julia-processcoverage@v1 - - uses: codecov/codecov-action@v1 - with: - file: lcov.info - docs: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: julia-actions/setup-julia@v1 - with: - version: '1' - - name: Generate docs - run: | - julia --project=docs --color=yes -e 'using Pkg; Pkg.instantiate(); include("docs/make.jl")' - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # For authentication with GitHub Actions token - DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} # For authentication with SSH deploy key - +name: CI +on: + push: + branches: + - 'master' + - 'release-' + tags: '*' + pull_request: + +jobs: + test: + name: Julia ${{ matrix.version }} - ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + version: + - '1.6' + - '1' + # - '~1.9.0-0' + - 'nightly' + os: + - ubuntu-latest + include: + - os: macOS-latest + version: '1' + - os: windows-latest + version: '1' + steps: + - uses: actions/checkout@v2 + - uses: julia-actions/setup-julia@v1 + with: + version: ${{ matrix.version }} + - uses: julia-actions/cache@v1 + - uses: julia-actions/julia-buildpkg@v1 + - uses: julia-actions/julia-runtest@v1 + - uses: julia-actions/julia-processcoverage@v1 + - uses: codecov/codecov-action@v1 + with: + file: lcov.info + docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: julia-actions/setup-julia@v1 + with: + version: '1' + - name: Generate docs + run: | + julia --project=docs --color=yes -e 'using Pkg; Pkg.instantiate(); include("docs/make.jl")' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # For authentication with GitHub Actions token + DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} # For authentication with SSH deploy key + diff --git a/.github/workflows/TagBot.yml b/.github/workflows/TagBot.yml index f49313b6..083baf95 100644 --- a/.github/workflows/TagBot.yml +++ b/.github/workflows/TagBot.yml @@ -1,15 +1,15 @@ -name: TagBot -on: - issue_comment: - types: - - created - workflow_dispatch: -jobs: - TagBot: - if: github.event_name == 'workflow_dispatch' || github.actor == 'JuliaTagBot' - runs-on: ubuntu-latest - steps: - - uses: JuliaRegistries/TagBot@v1 - with: - token: ${{ secrets.GITHUB_TOKEN }} - ssh: ${{ secrets.DOCUMENTER_KEY }} +name: TagBot +on: + issue_comment: + types: + - created + workflow_dispatch: +jobs: + TagBot: + if: github.event_name == 'workflow_dispatch' || github.actor == 'JuliaTagBot' + runs-on: ubuntu-latest + steps: + - uses: JuliaRegistries/TagBot@v1 + with: + token: ${{ secrets.GITHUB_TOKEN }} + ssh: ${{ secrets.DOCUMENTER_KEY }} diff --git a/.gitignore b/.gitignore index 2d168d89..e6618ec8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,9 @@ -*.jl.cov -*.jl.*.cov -*.jl.mem -*.jld -docs/build/ -docs/site/ -benchmark/*.md -/Manifest.toml -/test/coverage/Manifest.toml +*.jl.cov +*.jl.*.cov +*.jl.mem +*.jld +docs/build/ +docs/site/ +benchmark/*.md +/Manifest.toml +/test/coverage/Manifest.toml diff --git a/CITATION.cff b/CITATION.cff index 7a9533d3..90a68063 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -1,35 +1,35 @@ -cff-version: 1.2.0 -title: Tensors.jl -message: >- - If you use this software, please cite this software using - the metadata from "preferred-citation". -type: software -authors: - - family-names: Carlsson - given-names: Kristoffer - orcid: 'https://orcid.org/0000-0001-9092-3092' - - family-names: Ekre - given-names: Fredrik - orcid: 'https://orcid.org/0000-0003-2476-5406' - - name: Tensors.jl contributors -identifiers: - - type: doi - value: 10.5281/zenodo.802356 - description: >- - DOI representing all versions of Tensors.jl (Zenodo - Concept DOI) -repository-code: 'https://github.com/Ferrite-FEM/Tensors.jl' -license: MIT -preferred-citation: - authors: - - family-names: Carlsson - given-names: Kristoffer - orcid: 'https://orcid.org/0000-0001-9092-3092' - - family-names: Ekre - given-names: Fredrik - orcid: 'https://orcid.org/0000-0003-2476-5406' - doi: 10.5334/jors.182 - journal: Journal of Open Research Software - title: Tensors.jl — Tensor Computations in Julia - type: article - year: 2019 +cff-version: 1.2.0 +title: Tensors.jl +message: >- + If you use this software, please cite this software using + the metadata from "preferred-citation". +type: software +authors: + - family-names: Carlsson + given-names: Kristoffer + orcid: 'https://orcid.org/0000-0001-9092-3092' + - family-names: Ekre + given-names: Fredrik + orcid: 'https://orcid.org/0000-0003-2476-5406' + - name: Tensors.jl contributors +identifiers: + - type: doi + value: 10.5281/zenodo.802356 + description: >- + DOI representing all versions of Tensors.jl (Zenodo + Concept DOI) +repository-code: 'https://github.com/Ferrite-FEM/Tensors.jl' +license: MIT +preferred-citation: + authors: + - family-names: Carlsson + given-names: Kristoffer + orcid: 'https://orcid.org/0000-0001-9092-3092' + - family-names: Ekre + given-names: Fredrik + orcid: 'https://orcid.org/0000-0003-2476-5406' + doi: 10.5334/jors.182 + journal: Journal of Open Research Software + title: Tensors.jl — Tensor Computations in Julia + type: article + year: 2019 diff --git a/LICENSE.md b/LICENSE.md index d04ffc16..ff8e4e53 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,74 +1,74 @@ -The Tensors.jl package is licensed under the MIT "Expat" License: - -> Copyright (c) 2016: Kristoffer Carlsson. -> -> Permission is hereby granted, free of charge, to any person obtaining -> a copy of this software and associated documentation files (the -> "Software"), to deal in the Software without restriction, including -> without limitation the rights to use, copy, modify, merge, publish, -> distribute, sublicense, and/or sell copies of the Software, and to -> permit persons to whom the Software is furnished to do so, subject to -> the following conditions: -> -> The above copyright notice and this permission notice shall be -> included in all copies or substantial portions of the Software. -> -> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -> EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -> MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -> IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -> CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -> TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -> SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -The file `eigen.jl` is derived from StaticArrays.jl, which is under the -MIT "Expat" License: - -> Copyright (c) 2016: Andy Ferris. -> -> Permission is hereby granted, free of charge, to any person obtaining -> a copy of this software and associated documentation files (the -> "Software"), to deal in the Software without restriction, including -> without limitation the rights to use, copy, modify, merge, publish, -> distribute, sublicense, and/or sell copies of the Software, and to -> permit persons to whom the Software is furnished to do so, subject to -> the following conditions: -> -> The above copyright notice and this permission notice shall be -> included in all copies or substantial portions of the Software. -> -> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -> EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -> MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -> IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -> CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -> TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -> SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -One of the methods in `eigen.jl` is under the Boost Software License: - -> Boost Software License - Version 1.0 - August 17th, 2003 -> -> -> -> Permission is hereby granted, free of charge, to any person or organization -> obtaining a copy of the software and accompanying documentation covered by -> this license (the "Software") to use, reproduce, display, distribute, -> execute, and transmit the Software, and to prepare derivative works of the -> Software, and to permit third-parties to whom the Software is furnished to -> do so, all subject to the following: -> -> The copyright notices in the Software and this entire statement, including -> the above license grant, this restriction and the following disclaimer, -> must be included in all copies of the Software, in whole or in part, and -> all derivative works of the Software, unless such copies or derivative -> works are solely in the form of machine-executable object code generated by -> a source language processor. -> -> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -> FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -> SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -> FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -> ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -> DEALINGS IN THE SOFTWARE. +The Tensors.jl package is licensed under the MIT "Expat" License: + +> Copyright (c) 2016: Kristoffer Carlsson. +> +> Permission is hereby granted, free of charge, to any person obtaining +> a copy of this software and associated documentation files (the +> "Software"), to deal in the Software without restriction, including +> without limitation the rights to use, copy, modify, merge, publish, +> distribute, sublicense, and/or sell copies of the Software, and to +> permit persons to whom the Software is furnished to do so, subject to +> the following conditions: +> +> The above copyright notice and this permission notice shall be +> included in all copies or substantial portions of the Software. +> +> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +> EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +> MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +> IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +> CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +> TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +> SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +The file `eigen.jl` is derived from StaticArrays.jl, which is under the +MIT "Expat" License: + +> Copyright (c) 2016: Andy Ferris. +> +> Permission is hereby granted, free of charge, to any person obtaining +> a copy of this software and associated documentation files (the +> "Software"), to deal in the Software without restriction, including +> without limitation the rights to use, copy, modify, merge, publish, +> distribute, sublicense, and/or sell copies of the Software, and to +> permit persons to whom the Software is furnished to do so, subject to +> the following conditions: +> +> The above copyright notice and this permission notice shall be +> included in all copies or substantial portions of the Software. +> +> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +> EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +> MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +> IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +> CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +> TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +> SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +One of the methods in `eigen.jl` is under the Boost Software License: + +> Boost Software License - Version 1.0 - August 17th, 2003 +> +> +> +> Permission is hereby granted, free of charge, to any person or organization +> obtaining a copy of the software and accompanying documentation covered by +> this license (the "Software") to use, reproduce, display, distribute, +> execute, and transmit the Software, and to prepare derivative works of the +> Software, and to permit third-parties to whom the Software is furnished to +> do so, all subject to the following: +> +> The copyright notices in the Software and this entire statement, including +> the above license grant, this restriction and the following disclaimer, +> must be included in all copies of the Software, in whole or in part, and +> all derivative works of the Software, unless such copies or derivative +> works are solely in the form of machine-executable object code generated by +> a source language processor. +> +> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +> FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +> SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +> FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +> ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +> DEALINGS IN THE SOFTWARE. diff --git a/Project.toml b/Project.toml index 38f758ea..e7c759cb 100644 --- a/Project.toml +++ b/Project.toml @@ -1,29 +1,29 @@ -name = "Tensors" -uuid = "48a634ad-e948-5137-8d70-aa71f2a747f4" -version = "1.16.1" - -[deps] -ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -SIMD = "fdea26ae-647d-5447-a871-4b548cad5224" -PrecompileTools = "aea7be01-6a6a-4083-8856-8a6e6704d82a" -StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" -Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" - -[compat] -ForwardDiff = "0.10" -SIMD = "2, 3" -PrecompileTools = "1" -StaticArrays = "1" -LinearAlgebra = "1" -Statistics = "1" -julia = "1" - -[extras] -Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" -TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" -Unitful = "1986cc42-f94f-5a68-af5c-568840ba703d" - -[targets] -test = ["Random", "Test", "TimerOutputs", "Unitful"] +name = "Tensors" +uuid = "48a634ad-e948-5137-8d70-aa71f2a747f4" +version = "1.16.1" + +[deps] +ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" +LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +SIMD = "fdea26ae-647d-5447-a871-4b548cad5224" +PrecompileTools = "aea7be01-6a6a-4083-8856-8a6e6704d82a" +StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" +Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" + +[compat] +ForwardDiff = "0.10" +SIMD = "2, 3" +PrecompileTools = "1" +StaticArrays = "1" +LinearAlgebra = "1" +Statistics = "1" +julia = "1" + +[extras] +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" +Unitful = "1986cc42-f94f-5a68-af5c-568840ba703d" + +[targets] +test = ["Random", "Test", "TimerOutputs", "Unitful"] diff --git a/README.md b/README.md index 4e10a7b1..10f388dd 100644 --- a/README.md +++ b/README.md @@ -1,82 +1,82 @@ -# Tensors.jl - -*Efficient computations with symmetric and non-symmetric tensors with support for automatic differentiation.* - -| **Documentation** | **Build Status** | -|:---------------------------------------:|:-----------------------------------------------------:| -| [![][docs-stable-img]][docs-stable-url] | [![][ci-img]][ci-url] [![][codecov-img]][codecov-url] | - -## Introduction - -This Julia package provides fast operations with symmetric and non-symmetric tensors of order 1, 2 and 4. -The tensors are allocated on the stack which means that there is no need to preallocate output results for performance. -Unicode infix operators are provided such that the tensor expression in the source code is similar to the one written with mathematical notation. -When possible, symmetry of tensors is exploited for better performance. -Supports Automatic Differentiation to easily compute first and second order derivatives of tensorial functions. - -## Installation - -The package can be installed with the Julia package manager. -From the Julia REPL, type `]` to enter the Pkg REPL mode and run: - -``` -pkg> add Tensors -``` - -Or, equivalently, via the `Pkg` API: - -```julia -julia> import Pkg; Pkg.add("Tensors") -``` - -## Documentation - -- [**STABLE**][docs-stable-url] — **most recently tagged version of the documentation.** - -## Project Status - -The package is tested against Julia `1.X` on Linux, macOS, and Windows. - -## Contributing and Questions - -Contributions are very welcome, as are feature requests and suggestions. Please open an [issue][issues-url] if you encounter any problems. - -### Things to work on - -If you are interested in contributing to Tensors.jl, here are a few topics that can get you started: - -* Implement support for third order tensors. These are more rarely used than first, second and fourth order tensors but are still useful in some applications. It would be good to support this. -* Find a way to reduce code duplication without sacrificing performance or compilation time. Currently, there is quite a lot of code duplication in the implementation of different operators. It should be possible to have a higher level code generation framework that generates optimized functions from pretty much only the Einstein summation notation for the operation. -* Tensors.jl has been developed with mostly the application to continuum mechanics in mind. For other fields, perhaps other tensor operations are useful. Implement these in a well performant manner and give good test coverage and documentation for the new functionalities. - -## Citing Tensors.jl - -If you use Tensors.jl for research and publication, please cite the following article -``` -@article{Tensors.jl, - title = {Tensors.jl -- Tensor Computations in Julia}, - author = {Carlsson, Kristoffer and Ekre, Fredrik}, - year = {2019}, - journal = {Journal of Open Research Software}, - doi = {10.5334/jors.182}, -} -``` - -## Related packages - -Both the packages below provide a convenience macro to provide einstein summation notation for standard Julia `Array`'s: - -* [Einsum.jl](https://github.com/ahwillia/Einsum.jl) -* [TensorOperations.jl](https://github.com/Jutho/TensorOperations.jl) - - -[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg -[docs-stable-url]: https://ferrite-fem.github.io/Tensors.jl/stable - -[ci-img]: https://github.com/Ferrite-FEM/Tensors.jl/workflows/CI/badge.svg?branch=master -[ci-url]: https://github.com/Ferrite-FEM/Tensors.jl/actions?query=workflows%3ACI+branch%3Amaster - -[issues-url]: https://github.com/Ferrite-FEM/Tensors.jl/issues - -[codecov-img]: https://codecov.io/gh/Ferrite-FEM/Tensors.jl/branch/master/graph/badge.svg?branch=master -[codecov-url]: https://codecov.io/gh/Ferrite-FEM/Tensors.jl?branch=master +# Tensors.jl + +*Efficient computations with symmetric and non-symmetric tensors with support for automatic differentiation.* + +| **Documentation** | **Build Status** | +|:---------------------------------------:|:-----------------------------------------------------:| +| [![][docs-stable-img]][docs-stable-url] | [![][ci-img]][ci-url] [![][codecov-img]][codecov-url] | + +## Introduction + +This Julia package provides fast operations with symmetric and non-symmetric tensors of order 1, 2 and 4. +The tensors are allocated on the stack which means that there is no need to preallocate output results for performance. +Unicode infix operators are provided such that the tensor expression in the source code is similar to the one written with mathematical notation. +When possible, symmetry of tensors is exploited for better performance. +Supports Automatic Differentiation to easily compute first and second order derivatives of tensorial functions. + +## Installation + +The package can be installed with the Julia package manager. +From the Julia REPL, type `]` to enter the Pkg REPL mode and run: + +``` +pkg> add Tensors +``` + +Or, equivalently, via the `Pkg` API: + +```julia +julia> import Pkg; Pkg.add("Tensors") +``` + +## Documentation + +- [**STABLE**][docs-stable-url] — **most recently tagged version of the documentation.** + +## Project Status + +The package is tested against Julia `1.X` on Linux, macOS, and Windows. + +## Contributing and Questions + +Contributions are very welcome, as are feature requests and suggestions. Please open an [issue][issues-url] if you encounter any problems. + +### Things to work on + +If you are interested in contributing to Tensors.jl, here are a few topics that can get you started: + +* Implement support for third order tensors. These are more rarely used than first, second and fourth order tensors but are still useful in some applications. It would be good to support this. +* Find a way to reduce code duplication without sacrificing performance or compilation time. Currently, there is quite a lot of code duplication in the implementation of different operators. It should be possible to have a higher level code generation framework that generates optimized functions from pretty much only the Einstein summation notation for the operation. +* Tensors.jl has been developed with mostly the application to continuum mechanics in mind. For other fields, perhaps other tensor operations are useful. Implement these in a well performant manner and give good test coverage and documentation for the new functionalities. + +## Citing Tensors.jl + +If you use Tensors.jl for research and publication, please cite the following article +``` +@article{Tensors.jl, + title = {Tensors.jl -- Tensor Computations in Julia}, + author = {Carlsson, Kristoffer and Ekre, Fredrik}, + year = {2019}, + journal = {Journal of Open Research Software}, + doi = {10.5334/jors.182}, +} +``` + +## Related packages + +Both the packages below provide a convenience macro to provide einstein summation notation for standard Julia `Array`'s: + +* [Einsum.jl](https://github.com/ahwillia/Einsum.jl) +* [TensorOperations.jl](https://github.com/Jutho/TensorOperations.jl) + + +[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg +[docs-stable-url]: https://ferrite-fem.github.io/Tensors.jl/stable + +[ci-img]: https://github.com/Ferrite-FEM/Tensors.jl/workflows/CI/badge.svg?branch=master +[ci-url]: https://github.com/Ferrite-FEM/Tensors.jl/actions?query=workflows%3ACI+branch%3Amaster + +[issues-url]: https://github.com/Ferrite-FEM/Tensors.jl/issues + +[codecov-img]: https://codecov.io/gh/Ferrite-FEM/Tensors.jl/branch/master/graph/badge.svg?branch=master +[codecov-url]: https://codecov.io/gh/Ferrite-FEM/Tensors.jl?branch=master diff --git a/benchmark/benchmark_ad.jl b/benchmark/benchmark_ad.jl index 7512a011..fbbdfcb7 100644 --- a/benchmark/benchmark_ad.jl +++ b/benchmark/benchmark_ad.jl @@ -1,52 +1,52 @@ -import ForwardDiff: valtype -const ∇ = Tensors.gradient -const Δ = Tensors.hessian - -function Ψ(C, μ, Kb) - T = ForwardDiff.valtype(eltype(C)) - detC = det(C) - J = sqrt(detC) - Ĉ = detC^(convert(T, -1 / 3)) * C - return (convert(T, μ) * (tr(Ĉ) - 3) / 2 + convert(T, Kb) * (J - 1)^2) -end - -function S(C, μ, Kb) - T = ForwardDiff.valtype(eltype(C)) - I = one(C) - J = sqrt(det(C)) - invC = inv(C) - return convert(T, μ) * det(C)^(convert(T, -1/3)) * (I - tr(C) * invC / 3) + convert(T, Kb) * (J - 1) * J * invC -end - -const μ = 1e10; -const Kb = 1.66e11; - -Ψ(C) = Ψ(C, μ, Kb) -S(C) = S(C, μ, Kb) - -SUITE["gradient"] = BenchmarkGroup(["ad"]) -SUITE["hessian"] = BenchmarkGroup(["ad"]) - -for dim in (ALL_DIMENSIONS ? (1,2,3) : (3,)) - for T in (Float64, Float32) - V2 = tensor_dict[(dim, 2, T)] - V2 = V2' ⋅ V2 - V2sym = symtensor_dict[(dim, 2, T)] - V2sym = V2sym' ⋅ V2sym - - @assert eltype(Ψ(V2)) == T - @assert eltype(S(V2)) == T - @assert eltype(∇(Ψ, V2)) == T - @assert eltype(Δ(Ψ, V2)) == T - - SUITE["gradient"]["∇Ψ(SymmetricTensor{2, $dim, $T})"] = @benchmarkable ∇(Ψ, $V2sym) - SUITE["gradient"]["∇Ψ(Tensor{2, $dim, $T})"] = @benchmarkable ∇(Ψ, $V2) - SUITE["gradient"]["∇Ψ(SymmetricTensor{2, $dim, $T}) - ana"] = @benchmarkable S($V2sym) - SUITE["gradient"]["∇Ψ(Tensor{2, $dim, $T}) - ana"] = @benchmarkable S($V2) - SUITE["gradient"]["∇S(SymmetricTensor{2, $dim, $T})"] = @benchmarkable ∇(S, $V2sym) - SUITE["gradient"]["∇S(Tensor{2, $dim, $T})"] = @benchmarkable ∇(S, $V2) - - SUITE["hessian"]["ΔΨ(SymmetricTensor{2, $dim, $T})"] = @benchmarkable Δ(Ψ, $V2sym) - SUITE["hessian"]["ΔΨ(Tensor{2, $dim, $T})"] = @benchmarkable Δ(Ψ, $V2) - end -end +import ForwardDiff: valtype +const ∇ = Tensors.gradient +const Δ = Tensors.hessian + +function Ψ(C, μ, Kb) + T = ForwardDiff.valtype(eltype(C)) + detC = det(C) + J = sqrt(detC) + Ĉ = detC^(convert(T, -1 / 3)) * C + return (convert(T, μ) * (tr(Ĉ) - 3) / 2 + convert(T, Kb) * (J - 1)^2) +end + +function S(C, μ, Kb) + T = ForwardDiff.valtype(eltype(C)) + I = one(C) + J = sqrt(det(C)) + invC = inv(C) + return convert(T, μ) * det(C)^(convert(T, -1/3)) * (I - tr(C) * invC / 3) + convert(T, Kb) * (J - 1) * J * invC +end + +const μ = 1e10; +const Kb = 1.66e11; + +Ψ(C) = Ψ(C, μ, Kb) +S(C) = S(C, μ, Kb) + +SUITE["gradient"] = BenchmarkGroup(["ad"]) +SUITE["hessian"] = BenchmarkGroup(["ad"]) + +for dim in (ALL_DIMENSIONS ? (1,2,3) : (3,)) + for T in (Float64, Float32) + V2 = tensor_dict[(dim, 2, T)] + V2 = V2' ⋅ V2 + V2sym = symtensor_dict[(dim, 2, T)] + V2sym = V2sym' ⋅ V2sym + + @assert eltype(Ψ(V2)) == T + @assert eltype(S(V2)) == T + @assert eltype(∇(Ψ, V2)) == T + @assert eltype(Δ(Ψ, V2)) == T + + SUITE["gradient"]["∇Ψ(SymmetricTensor{2, $dim, $T})"] = @benchmarkable ∇(Ψ, $V2sym) + SUITE["gradient"]["∇Ψ(Tensor{2, $dim, $T})"] = @benchmarkable ∇(Ψ, $V2) + SUITE["gradient"]["∇Ψ(SymmetricTensor{2, $dim, $T}) - ana"] = @benchmarkable S($V2sym) + SUITE["gradient"]["∇Ψ(Tensor{2, $dim, $T}) - ana"] = @benchmarkable S($V2) + SUITE["gradient"]["∇S(SymmetricTensor{2, $dim, $T})"] = @benchmarkable ∇(S, $V2sym) + SUITE["gradient"]["∇S(Tensor{2, $dim, $T})"] = @benchmarkable ∇(S, $V2) + + SUITE["hessian"]["ΔΨ(SymmetricTensor{2, $dim, $T})"] = @benchmarkable Δ(Ψ, $V2sym) + SUITE["hessian"]["ΔΨ(Tensor{2, $dim, $T})"] = @benchmarkable Δ(Ψ, $V2) + end +end diff --git a/benchmark/benchmark_doc.jl b/benchmark/benchmark_doc.jl index 6fc04910..0f192c38 100644 --- a/benchmark/benchmark_doc.jl +++ b/benchmark/benchmark_doc.jl @@ -1,151 +1,151 @@ -# generate benchmarks for documentation -function generate_docbenchmark() - a = tensor_dict[(3, 1, Float64)] - A = tensor_dict[(3, 2, Float64)] - As = symtensor_dict[(3, 2, Float64)] - AA = tensor_dict[(3, 4, Float64)] - AAs = symtensor_dict[(3, 4, Float64)] - - # arrays and voigt forms - b = Array(a) - B = Array(A); Bv = reshape(B, (9, )) - Bs = Array(As); Bsv = reshape(Bs, (9, )) - BB = Array(AA); BBv = reshape(BB, (9, 9)) - BBs = Array(AAs); BBsv = reshape(BBs, (9, 9)) - - open(joinpath(dirname(@__FILE__), "../docs/src/benchmarks.md"), "w") do file - function printheader(head) - println(file, "| **$(head)** | | | |") - end - function printrow(op) - pretty = (t) -> BenchmarkTools.prettytime(BenchmarkTools.time(minimum(t))) - speedup = (ta, tt) -> round(10*BenchmarkTools.time(minimum(ta))/BenchmarkTools.time(minimum(tt)))/10 - println(file, "| `$(op)` | $(pretty(tt)) | $(pretty(ta)) | ×$(speedup(ta, tt)) |") - end - - print(file, """ - # Benchmarks - - Here are some benchmark timings for tensors in 3 dimensions. For comparison - the timings for the same operations using standard Julia `Array`s are also - presented. - - In the table below, `a` denotes a vector, `A`, `As` denotes second order - non-symmetric and symmetric tensors and `AA`, `AAs` denotes fourth order - non-symmetric and symmetric tensors respectively. - - | Operation | `Tensor` | `Array` | speed-up | - |:-----------|---------:|--------:|---------:| - """) - - printheader("Single contraction") - tt = @benchmark $a ⋅ $a - ta = @benchmark $b ⋅ $b - printrow("a ⋅ a") - - tt = @benchmark $A ⋅ $a - ta = @benchmark $B * $b - printrow("A ⋅ a") - - tt = @benchmark $A ⋅ $A - ta = @benchmark $B * $B - printrow("A ⋅ A") - - tt = @benchmark $As ⋅ $As - ta = @benchmark $Bs * $Bs - printrow("As ⋅ As") - - printheader("Double contraction") - tt = @benchmark $A ⊡ $A - ta = @benchmark $B ⋅ $B - printrow("A ⊡ A") - - tt = @benchmark $As ⊡ $As - ta = @benchmark $Bsv ⋅ $Bsv - printrow("As ⊡ As") - - tt = @benchmark $AA ⊡ $A - ta = @benchmark $BBv * $Bv - printrow("AA ⊡ A") - - tt = @benchmark $AA ⊡ $AA - ta = @benchmark $BBv * $BBv - printrow("AA ⊡ AA") - - tt = @benchmark $AAs ⊡ $AAs - ta = @benchmark $BBsv * $BBsv - printrow("AAs ⊡ AAs") - - tt = @benchmark $As ⊡ $AAs ⊡ $As - ta = @benchmark $Bsv ⋅ ($BBsv * $Bsv) - printrow("As ⊡ AAs ⊡ As") - - printheader("Outer product") - tt = @benchmark $a ⊗ $a - ta = @benchmark $b * $(b)' - printrow("a ⊗ a") - - tt = @benchmark $A ⊗ $A - ta = @benchmark $Bv * $(Bv)' - printrow("A ⊗ A") - - tt = @benchmark $As ⊗ $As - ta = @benchmark $Bsv * $(Bsv)' - printrow("As ⊗ As") - - printheader("Other operations") - tt = @benchmark det($A) - ta = @benchmark det($B) - printrow("det(A)") - - tt = @benchmark det($As) - ta = @benchmark det($Bs) - printrow("det(As)") - - tt = @benchmark inv($A) - ta = @benchmark inv($B) - printrow("inv(A)") - - tt = @benchmark inv($As) - ta = @benchmark inv($Bs) - printrow("inv(As)") - - tt = @benchmark norm($a) - ta = @benchmark norm($b) - printrow("norm(a)") - - tt = @benchmark norm($A) - ta = @benchmark norm($(Bv)) - printrow("norm(A)") - - tt = @benchmark norm($As) - ta = @benchmark norm($(Bsv)) - printrow("norm(As)") - - tt = @benchmark norm($AA) - ta = @benchmark norm($(BBv[:])) - printrow("norm(AA)") - - tt = @benchmark norm($AAs) - ta = @benchmark norm($(BBsv[:])) - printrow("norm(AAs)") - - tt = @benchmark $a × $a - ta = @benchmark $b × $b - printrow("a × a") - - # print versioninfo - println(file, """ - - - The benchmarks are generated by - [`benchmark_doc.jl`](https://github.com/Ferrite-FEM/Tensors.jl/blob/master/benchmark/benchmark_doc.jl) - on the following system: - - ``` - julia> versioninfo() - """) - versioninfo(file) - println(file, "```") - end -end +# generate benchmarks for documentation +function generate_docbenchmark() + a = tensor_dict[(3, 1, Float64)] + A = tensor_dict[(3, 2, Float64)] + As = symtensor_dict[(3, 2, Float64)] + AA = tensor_dict[(3, 4, Float64)] + AAs = symtensor_dict[(3, 4, Float64)] + + # arrays and voigt forms + b = Array(a) + B = Array(A); Bv = reshape(B, (9, )) + Bs = Array(As); Bsv = reshape(Bs, (9, )) + BB = Array(AA); BBv = reshape(BB, (9, 9)) + BBs = Array(AAs); BBsv = reshape(BBs, (9, 9)) + + open(joinpath(dirname(@__FILE__), "../docs/src/benchmarks.md"), "w") do file + function printheader(head) + println(file, "| **$(head)** | | | |") + end + function printrow(op) + pretty = (t) -> BenchmarkTools.prettytime(BenchmarkTools.time(minimum(t))) + speedup = (ta, tt) -> round(10*BenchmarkTools.time(minimum(ta))/BenchmarkTools.time(minimum(tt)))/10 + println(file, "| `$(op)` | $(pretty(tt)) | $(pretty(ta)) | ×$(speedup(ta, tt)) |") + end + + print(file, """ + # Benchmarks + + Here are some benchmark timings for tensors in 3 dimensions. For comparison + the timings for the same operations using standard Julia `Array`s are also + presented. + + In the table below, `a` denotes a vector, `A`, `As` denotes second order + non-symmetric and symmetric tensors and `AA`, `AAs` denotes fourth order + non-symmetric and symmetric tensors respectively. + + | Operation | `Tensor` | `Array` | speed-up | + |:-----------|---------:|--------:|---------:| + """) + + printheader("Single contraction") + tt = @benchmark $a ⋅ $a + ta = @benchmark $b ⋅ $b + printrow("a ⋅ a") + + tt = @benchmark $A ⋅ $a + ta = @benchmark $B * $b + printrow("A ⋅ a") + + tt = @benchmark $A ⋅ $A + ta = @benchmark $B * $B + printrow("A ⋅ A") + + tt = @benchmark $As ⋅ $As + ta = @benchmark $Bs * $Bs + printrow("As ⋅ As") + + printheader("Double contraction") + tt = @benchmark $A ⊡ $A + ta = @benchmark $B ⋅ $B + printrow("A ⊡ A") + + tt = @benchmark $As ⊡ $As + ta = @benchmark $Bsv ⋅ $Bsv + printrow("As ⊡ As") + + tt = @benchmark $AA ⊡ $A + ta = @benchmark $BBv * $Bv + printrow("AA ⊡ A") + + tt = @benchmark $AA ⊡ $AA + ta = @benchmark $BBv * $BBv + printrow("AA ⊡ AA") + + tt = @benchmark $AAs ⊡ $AAs + ta = @benchmark $BBsv * $BBsv + printrow("AAs ⊡ AAs") + + tt = @benchmark $As ⊡ $AAs ⊡ $As + ta = @benchmark $Bsv ⋅ ($BBsv * $Bsv) + printrow("As ⊡ AAs ⊡ As") + + printheader("Outer product") + tt = @benchmark $a ⊗ $a + ta = @benchmark $b * $(b)' + printrow("a ⊗ a") + + tt = @benchmark $A ⊗ $A + ta = @benchmark $Bv * $(Bv)' + printrow("A ⊗ A") + + tt = @benchmark $As ⊗ $As + ta = @benchmark $Bsv * $(Bsv)' + printrow("As ⊗ As") + + printheader("Other operations") + tt = @benchmark det($A) + ta = @benchmark det($B) + printrow("det(A)") + + tt = @benchmark det($As) + ta = @benchmark det($Bs) + printrow("det(As)") + + tt = @benchmark inv($A) + ta = @benchmark inv($B) + printrow("inv(A)") + + tt = @benchmark inv($As) + ta = @benchmark inv($Bs) + printrow("inv(As)") + + tt = @benchmark norm($a) + ta = @benchmark norm($b) + printrow("norm(a)") + + tt = @benchmark norm($A) + ta = @benchmark norm($(Bv)) + printrow("norm(A)") + + tt = @benchmark norm($As) + ta = @benchmark norm($(Bsv)) + printrow("norm(As)") + + tt = @benchmark norm($AA) + ta = @benchmark norm($(BBv[:])) + printrow("norm(AA)") + + tt = @benchmark norm($AAs) + ta = @benchmark norm($(BBsv[:])) + printrow("norm(AAs)") + + tt = @benchmark $a × $a + ta = @benchmark $b × $b + printrow("a × a") + + # print versioninfo + println(file, """ + + + The benchmarks are generated by + [`benchmark_doc.jl`](https://github.com/Ferrite-FEM/Tensors.jl/blob/master/benchmark/benchmark_doc.jl) + on the following system: + + ``` + julia> versioninfo() + """) + versioninfo(file) + println(file, "```") + end +end diff --git a/benchmark/benchmark_functions.jl b/benchmark/benchmark_functions.jl index 989e0183..23053dca 100644 --- a/benchmark/benchmark_functions.jl +++ b/benchmark/benchmark_functions.jl @@ -1,183 +1,183 @@ -SUITE["dot"] = BenchmarkGroup() -SUITE["dcontract"] = BenchmarkGroup() -SUITE["otimes"] = BenchmarkGroup() -SUITE["other"] = BenchmarkGroup() -SUITE["promotion"] = BenchmarkGroup() -SUITE["constructors"] = BenchmarkGroup() -SUITE["basic-operations"] = BenchmarkGroup() - - -for dim in (ALL_DIMENSIONS ? (1,2,3) : (3,)) - for T in (Float64, Float32, dT) - v1 = tensor_dict[(dim, 1, T)] - V2 = tensor_dict[(dim, 2, T)] - V4 = tensor_dict[(dim, 4, T)] - V2sym = symtensor_dict[(dim, 2, T)] - V4sym = symtensor_dict[(dim, 4, T)] - - # dot - SUITE["dot"]["Vec{$dim, $T} ⋅ Vec{$dim, $T}"] = @benchmarkable dot($v1, $v1) - SUITE["dot"]["Tensor{2, $dim, $T} ⋅ Vec{$dim, $T}"] = @benchmarkable dot($V2, $v1) - SUITE["dot"]["Vec{$dim, $T} ⋅ Tensor{2, $dim, $T}"] = @benchmarkable dot($v1, $V2) - SUITE["dot"]["SymmetricTensor{2, $dim, $T} ⋅ Vec{$dim, $T}"] = @benchmarkable dot($V2sym, $v1) - SUITE["dot"]["Vec{$dim, $T} ⋅ SymmetricTensor{2, $dim, $T}"] = @benchmarkable dot($v1, $V2sym) - SUITE["dot"]["Tensor{2, $dim, $T} ⋅ Tensor{2, $dim, $T}"] = @benchmarkable dot($V2, $V2) - SUITE["dot"]["SymmetricTensor{2, $dim, $T} ⋅ SymmetricTensor{2, $dim, $T}"] = @benchmarkable dot($V2sym, $V2sym) - if MIXED_SYM_NONSYM - SUITE["dot"]["SymmetricTensor{2, $dim, $T} ⋅ Tensor{2, $dim, $T}"] = @benchmarkable dot($V2sym, $V2) - SUITE["dot"]["Tensor{2, $dim, $T} ⋅ SymmetricTensor{2, $dim, $T}"] = @benchmarkable dot($V2, $V2sym) - end - - # dcontract - SUITE["dcontract"]["Tensor{2, $dim, $T} ⊡ Tensor{2, $dim, $T}"] = @benchmarkable dcontract($V2, $V2) - SUITE["dcontract"]["SymmetricTensor{2, $dim, $T} ⊡ SymmetricTensor{2, $dim, $T}"] = @benchmarkable dcontract($V2sym, $V2sym) - if MIXED_SYM_NONSYM - SUITE["dcontract"]["Tensor{2, $dim, $T} ⊡ SymmetricTensor{2, $dim, $T}"] = @benchmarkable dcontract($V2, $V2sym) - SUITE["dcontract"]["SymmetricTensor{2, $dim, $T} ⊡ Tensor{2, $dim, $T}"] = @benchmarkable dcontract($V2sym, $V2) - end - SUITE["dcontract"]["Tensor{4, $dim, $T} ⊡ Tensor{2, $dim, $T}"] = @benchmarkable dcontract($V4, $V2) - SUITE["dcontract"]["Tensor{2, $dim, $T} ⊡ Tensor{4, $dim, $T}"] = @benchmarkable dcontract($V2, $V4) - SUITE["dcontract"]["Tensor{4, $dim, $T} ⊡ Tensor{4, $dim, $T}"] = @benchmarkable dcontract($V4, $V4) - if MIXED_SYM_NONSYM - SUITE["dcontract"]["SymmetricTensor{4, $dim, $T} ⊡ Tensor{2, $dim, $T}"] = @benchmarkable dcontract($V4sym, $V2) - SUITE["dcontract"]["Tensor{2, $dim, $T} ⊡ SymmetricTensor{4, $dim, $T}"] = @benchmarkable dcontract($V2, $V4sym) - SUITE["dcontract"]["Tensor{4, $dim, $T} ⊡ SymmetricTensor{2, $dim, $T}"] = @benchmarkable dcontract($V4, $V2sym) - SUITE["dcontract"]["SymmetricTensor{2, $dim, $T} ⊡ Tensor{4, $dim, $T}"] = @benchmarkable dcontract($V2sym, $V4) - SUITE["dcontract"]["SymmetricTensor{4, $dim, $T} ⊡ Tensor{4, $dim, $T}"] = @benchmarkable dcontract($V4sym, $V4) - SUITE["dcontract"]["Tensor{4, $dim, $T} ⊡ SymmetricTensor{4, $dim, $T}"] = @benchmarkable dcontract($V4, $V4sym) - end - SUITE["dcontract"]["Tensor{4, $dim, $T} ⊡ Tensor{4, $dim, $T}"] = @benchmarkable dcontract($V4, $V4) - SUITE["dcontract"]["SymmetricTensor{4, $dim, $T} ⊡ SymmetricTensor{4, $dim, $T}"] = @benchmarkable dcontract($V4sym, $V4sym) - - # otimes - SUITE["otimes"]["Vec{$dim, $T} ⊗ Vec{$dim, $T}"] = @benchmarkable otimes($v1, $v1) - SUITE["otimes"]["Tensor{2, $dim, $T} ⊗ Tensor{2, $dim, $T}"] = @benchmarkable otimes($V2, $V2) - SUITE["otimes"]["SymmetricTensor{2, $dim, $T} ⊗ SymmetricTensor{2, $dim, $T}"] = @benchmarkable otimes($V2sym, $V2sym) - if MIXED_SYM_NONSYM - SUITE["otimes"]["Tensor{2, $dim, $T} ⊗ SymmetricTensor{2, $dim, $T}"] = @benchmarkable otimes($V2, $V2sym) - SUITE["otimes"]["SymmetricTensor{2, $dim, $T} ⊗ Tensor{2, $dim, $T}"] = @benchmarkable otimes($V2sym, $V2) - end - - # other - for (i, V2t) in enumerate((V2, V2sym)) - TensorType = i == 2 ? "SymmetricTensor" : "Tensor" - for f in (norm, tr, vol, det, inv, transpose, symmetric, skew, eig, mean, dev) - (i == 1 || typeof(V2t) <: Tensor || T == dT) && f == eig && continue - SUITE["other"]["$f($TensorType{2, $dim, $T})"] = @benchmarkable $f($V2t) - end - end - - for (i, V4t) in enumerate((V4, V4sym)) - TensorType = i == 2 ? "SymmetricTensor" : "Tensor" - for f in (norm, symmetric) - SUITE["other"]["$f($TensorType{4, $dim, $T})"] = @benchmarkable $f($V4t) - end - end - - if T in (Float32, Float64) - for f in (:+, :-) - SUITE["basic-operations"]["Vec{$dim, $T} $f Vec{$dim, $T}"] = @benchmarkable $f($v1, $v1) - SUITE["basic-operations"]["Tensor{2, $dim, $T} $f Tensor{2, $dim, $T}"] = @benchmarkable $f($V2, $V2) - SUITE["basic-operations"]["SymmetricTensor{2, $dim, $T} $f SymmetricTensor{2, $dim, $T}"] = @benchmarkable $f($V2sym, $V2sym) - SUITE["basic-operations"]["Tensor{4, $dim, $T} $f Tensor{4, $dim, $T}"] = @benchmarkable $f($V4, $V4) - SUITE["basic-operations"]["SymmetricTensor{4, $dim, $T} $f SymmetricTensor{4, $dim, $T}"] = @benchmarkable $f($V4sym, $V4sym) - if MIXED_SYM_NONSYM - SUITE["basic-operations"]["Tensor{2, $dim, $T} $f SymmetricTensor{2, $dim, $T}"] = @benchmarkable $f($V2, $V2sym) - SUITE["basic-operations"]["Tensor{4, $dim, $T} $f SymmetricTensor{4, $dim, $T}"] = @benchmarkable $f($V4, $V4sym) - end - end - for f in (:*, :/) - n = rand(T) - SUITE["basic-operations"]["Vec{$dim, $T} $f $T"] = @benchmarkable $f($v1, $n) - SUITE["basic-operations"]["Tensor{2, $dim, $T} $f $T"] = @benchmarkable $f($V2, $n) - SUITE["basic-operations"]["SymmetricTensor{2, $dim, $T} $f $T"] = @benchmarkable $f($V2sym, $n) - SUITE["basic-operations"]["Tensor{4, $dim, $T} $f $T"] = @benchmarkable $f($V4, $n) - SUITE["basic-operations"]["SymmetricTensor{4, $dim, $T} $f $T"] = @benchmarkable $f($V4sym, $n) - if f == :* - SUITE["basic-operations"]["$T $f Vec{$dim, $T}"] = @benchmarkable $f($n, $v1) - SUITE["basic-operations"]["$T $f Tensor{2, $dim, $T}"] = @benchmarkable $f($n, $V2) - SUITE["basic-operations"]["$T $f SymmetricTensor{2, $dim, $T}"] = @benchmarkable $f($n, $V2sym) - SUITE["basic-operations"]["$T $f Tensor{4, $dim, $T}"] = @benchmarkable $f($n, $V4) - SUITE["basic-operations"]["$T $f SymmetricTensor{4, $dim, $T}"] = @benchmarkable $f($n, $V4sym) - end - end - end - end -end - -for dim in (ALL_DIMENSIONS ? (1,2,3) : (3,)) - a32 = tensor_dict[(dim, 1, Float32)] - a64 = tensor_dict[(dim, 1, Float64)] - A32 = tensor_dict[(dim, 2, Float32)] - A64 = tensor_dict[(dim, 2, Float64)] - AA32 = tensor_dict[(dim, 4, Float32)] - AA64 = tensor_dict[(dim, 4, Float64)] - A32s = symtensor_dict[(dim, 2, Float32)] - A64s = symtensor_dict[(dim, 2, Float64)] - AA32s = symtensor_dict[(dim, 4, Float32)] - AA64s = symtensor_dict[(dim, 4, Float64)] - - # promotion between tensortypes - SUITE["promotion"]["SymmetricTensor{2, $dim} -> Tensor{2, $dim}"] = @benchmarkable promote($A64, $A64s) - SUITE["promotion"]["SymmetricTensor{4, $dim} -> Tensor{4, $dim}"] = @benchmarkable promote($AA64, $AA64s) - - # element type promotion - SUITE["promotion"]["Vec{dim, Float32} -> Vec{$dim, Float64}"] = @benchmarkable promote($a64, $a32) - SUITE["promotion"]["Tensor{2, $dim, Float32} -> Tensor{2, $dim, Float64}"] = @benchmarkable promote($A64, $A32) - SUITE["promotion"]["SymmetricTensor{2, $dim, Float32} -> SymmetricTensor{2, $dim, Float64}"] = @benchmarkable promote($A64s, $A32s) - SUITE["promotion"]["Tensor{4, $dim, Float32} -> Tensor{4, $dim, Float64}"] = @benchmarkable promote($A64, $A32) - SUITE["promotion"]["SymmetricTensor{4, $dim, Float32} -> SymmetricTensor{4, $dim, Float64}"] = @benchmarkable promote($A64s, $A32s) - - # test just some operations with mixed tensortype and eltype and hope it catches things like - # https://github.com/Ferrite-FEM/Tensors.jl/pull/5#issuecomment-282518974 - if MIXED_ELTYPES - n = 5 - SUITE["promotion"]["Tensor{2, $dim, Float64} * $Int"] = @benchmarkable *($A64, $n) - SUITE["promotion"]["Tensor{2, $dim, Float64} / $Int"] = @benchmarkable /($A64, $n) - SUITE["promotion"]["Tensor{2, $dim, Float32} + Tensor{2, $dim, Float64}"] = @benchmarkable +($A32, $A64) - SUITE["promotion"]["Tensor{2, $dim, Float32} - Tensor{2, $dim, Float64}"] = @benchmarkable -($A32, $A64) - SUITE["promotion"]["Tensor{2, $dim, Float32} ⋅ Tensor{2, $dim, Float64}"] = @benchmarkable dot($A32, $A64) - SUITE["promotion"]["Tensor{2, $dim, Float32} ⊡ Tensor{2, $dim, Float64}"] = @benchmarkable dcontract($A32, $A64) - SUITE["promotion"]["Tensor{2, $dim, Float32} ⊗ Tensor{2, $dim, Float64}"] = @benchmarkable otimes($A32, $A64) - end -end - -# constructors (only testing for dim = 3) -# could be done cleaner, but https://github.com/JuliaCI/BenchmarkTools.jl/issues/50 -for f in (:zero, :one, :ones, :rand) - SUITE["constructors"]["$f(Tensor{2, 3, Float32})"] = @benchmarkable $(f)(Tensor{2, 3, Float32}) - SUITE["constructors"]["$f(Tensor{4, 3, Float32})"] = @benchmarkable $(f)(Tensor{4, 3, Float32}) - SUITE["constructors"]["$f(SymmetricTensor{2, 3, Float32})"] = @benchmarkable $(f)(SymmetricTensor{2, 3, Float32}) - SUITE["constructors"]["$f(SymmetricTensor{4, 3, Float32})"] = @benchmarkable $(f)(SymmetricTensor{4, 3, Float32}) - SUITE["constructors"]["$f(Tensor{2, 3, Float64})"] = @benchmarkable $(f)(Tensor{2, 3, Float64}) - SUITE["constructors"]["$f(Tensor{4, 3, Float64})"] = @benchmarkable $(f)(Tensor{4, 3, Float64}) - SUITE["constructors"]["$f(SymmetricTensor{2, 3, Float64})"] = @benchmarkable $(f)(SymmetricTensor{2, 3, Float64}) - SUITE["constructors"]["$f(SymmetricTensor{4, 3, Float64})"] = @benchmarkable $(f)(SymmetricTensor{4, 3, Float64}) - if f != :one - SUITE["constructors"]["$f(Vec{3, Float32})"] = @benchmarkable $(f)(Vec{3, Float32}) - SUITE["constructors"]["$f(Vec{3, Float64})"] = @benchmarkable $(f)(Vec{3, Float64}) - end -end - -# create from a Julia array -for order in (1, 2, 4) - dim = 3 - A = rand(Tensors.n_components(Tensor{order, dim})) - SUITE["constructors"]["Tensor{$order, $dim}(A::Array)"] = @benchmarkable Tensor{$order, $dim}($A) - if order != 1 - As = rand(Tensors.n_components(SymmetricTensor{order, dim})) - SUITE["constructors"]["SymmetricTensor{$order, $dim}(A::Array)"] = @benchmarkable SymmetricTensor{$order, $dim}($As) - end -end - -begin - dim = 3 - f1 = (i) -> float(i) - f2 = (i, j) -> float(i) - f4 = (i, j, k, l) -> float(i) - SUITE["constructors"]["Vec{3}(f::Function)"] = @benchmarkable Vec{3}($f1) - SUITE["constructors"]["Tensor{2, 3}(f::Function)"] = @benchmarkable Tensor{2, 3}($f2) - SUITE["constructors"]["Tensor{4, 3}(f::Function)"] = @benchmarkable Tensor{4, 3}($f4) - SUITE["constructors"]["SymmetricTensor{2, 3}(f::Function)"] = @benchmarkable SymmetricTensor{2, 3}($f2) - SUITE["constructors"]["SymmetricTensor{4, 3}(f::Function)"] = @benchmarkable SymmetricTensor{4, 3}($f4) -end +SUITE["dot"] = BenchmarkGroup() +SUITE["dcontract"] = BenchmarkGroup() +SUITE["otimes"] = BenchmarkGroup() +SUITE["other"] = BenchmarkGroup() +SUITE["promotion"] = BenchmarkGroup() +SUITE["constructors"] = BenchmarkGroup() +SUITE["basic-operations"] = BenchmarkGroup() + + +for dim in (ALL_DIMENSIONS ? (1,2,3) : (3,)) + for T in (Float64, Float32, dT) + v1 = tensor_dict[(dim, 1, T)] + V2 = tensor_dict[(dim, 2, T)] + V4 = tensor_dict[(dim, 4, T)] + V2sym = symtensor_dict[(dim, 2, T)] + V4sym = symtensor_dict[(dim, 4, T)] + + # dot + SUITE["dot"]["Vec{$dim, $T} ⋅ Vec{$dim, $T}"] = @benchmarkable dot($v1, $v1) + SUITE["dot"]["Tensor{2, $dim, $T} ⋅ Vec{$dim, $T}"] = @benchmarkable dot($V2, $v1) + SUITE["dot"]["Vec{$dim, $T} ⋅ Tensor{2, $dim, $T}"] = @benchmarkable dot($v1, $V2) + SUITE["dot"]["SymmetricTensor{2, $dim, $T} ⋅ Vec{$dim, $T}"] = @benchmarkable dot($V2sym, $v1) + SUITE["dot"]["Vec{$dim, $T} ⋅ SymmetricTensor{2, $dim, $T}"] = @benchmarkable dot($v1, $V2sym) + SUITE["dot"]["Tensor{2, $dim, $T} ⋅ Tensor{2, $dim, $T}"] = @benchmarkable dot($V2, $V2) + SUITE["dot"]["SymmetricTensor{2, $dim, $T} ⋅ SymmetricTensor{2, $dim, $T}"] = @benchmarkable dot($V2sym, $V2sym) + if MIXED_SYM_NONSYM + SUITE["dot"]["SymmetricTensor{2, $dim, $T} ⋅ Tensor{2, $dim, $T}"] = @benchmarkable dot($V2sym, $V2) + SUITE["dot"]["Tensor{2, $dim, $T} ⋅ SymmetricTensor{2, $dim, $T}"] = @benchmarkable dot($V2, $V2sym) + end + + # dcontract + SUITE["dcontract"]["Tensor{2, $dim, $T} ⊡ Tensor{2, $dim, $T}"] = @benchmarkable dcontract($V2, $V2) + SUITE["dcontract"]["SymmetricTensor{2, $dim, $T} ⊡ SymmetricTensor{2, $dim, $T}"] = @benchmarkable dcontract($V2sym, $V2sym) + if MIXED_SYM_NONSYM + SUITE["dcontract"]["Tensor{2, $dim, $T} ⊡ SymmetricTensor{2, $dim, $T}"] = @benchmarkable dcontract($V2, $V2sym) + SUITE["dcontract"]["SymmetricTensor{2, $dim, $T} ⊡ Tensor{2, $dim, $T}"] = @benchmarkable dcontract($V2sym, $V2) + end + SUITE["dcontract"]["Tensor{4, $dim, $T} ⊡ Tensor{2, $dim, $T}"] = @benchmarkable dcontract($V4, $V2) + SUITE["dcontract"]["Tensor{2, $dim, $T} ⊡ Tensor{4, $dim, $T}"] = @benchmarkable dcontract($V2, $V4) + SUITE["dcontract"]["Tensor{4, $dim, $T} ⊡ Tensor{4, $dim, $T}"] = @benchmarkable dcontract($V4, $V4) + if MIXED_SYM_NONSYM + SUITE["dcontract"]["SymmetricTensor{4, $dim, $T} ⊡ Tensor{2, $dim, $T}"] = @benchmarkable dcontract($V4sym, $V2) + SUITE["dcontract"]["Tensor{2, $dim, $T} ⊡ SymmetricTensor{4, $dim, $T}"] = @benchmarkable dcontract($V2, $V4sym) + SUITE["dcontract"]["Tensor{4, $dim, $T} ⊡ SymmetricTensor{2, $dim, $T}"] = @benchmarkable dcontract($V4, $V2sym) + SUITE["dcontract"]["SymmetricTensor{2, $dim, $T} ⊡ Tensor{4, $dim, $T}"] = @benchmarkable dcontract($V2sym, $V4) + SUITE["dcontract"]["SymmetricTensor{4, $dim, $T} ⊡ Tensor{4, $dim, $T}"] = @benchmarkable dcontract($V4sym, $V4) + SUITE["dcontract"]["Tensor{4, $dim, $T} ⊡ SymmetricTensor{4, $dim, $T}"] = @benchmarkable dcontract($V4, $V4sym) + end + SUITE["dcontract"]["Tensor{4, $dim, $T} ⊡ Tensor{4, $dim, $T}"] = @benchmarkable dcontract($V4, $V4) + SUITE["dcontract"]["SymmetricTensor{4, $dim, $T} ⊡ SymmetricTensor{4, $dim, $T}"] = @benchmarkable dcontract($V4sym, $V4sym) + + # otimes + SUITE["otimes"]["Vec{$dim, $T} ⊗ Vec{$dim, $T}"] = @benchmarkable otimes($v1, $v1) + SUITE["otimes"]["Tensor{2, $dim, $T} ⊗ Tensor{2, $dim, $T}"] = @benchmarkable otimes($V2, $V2) + SUITE["otimes"]["SymmetricTensor{2, $dim, $T} ⊗ SymmetricTensor{2, $dim, $T}"] = @benchmarkable otimes($V2sym, $V2sym) + if MIXED_SYM_NONSYM + SUITE["otimes"]["Tensor{2, $dim, $T} ⊗ SymmetricTensor{2, $dim, $T}"] = @benchmarkable otimes($V2, $V2sym) + SUITE["otimes"]["SymmetricTensor{2, $dim, $T} ⊗ Tensor{2, $dim, $T}"] = @benchmarkable otimes($V2sym, $V2) + end + + # other + for (i, V2t) in enumerate((V2, V2sym)) + TensorType = i == 2 ? "SymmetricTensor" : "Tensor" + for f in (norm, tr, vol, det, inv, transpose, symmetric, skew, eig, mean, dev) + (i == 1 || typeof(V2t) <: Tensor || T == dT) && f == eig && continue + SUITE["other"]["$f($TensorType{2, $dim, $T})"] = @benchmarkable $f($V2t) + end + end + + for (i, V4t) in enumerate((V4, V4sym)) + TensorType = i == 2 ? "SymmetricTensor" : "Tensor" + for f in (norm, symmetric) + SUITE["other"]["$f($TensorType{4, $dim, $T})"] = @benchmarkable $f($V4t) + end + end + + if T in (Float32, Float64) + for f in (:+, :-) + SUITE["basic-operations"]["Vec{$dim, $T} $f Vec{$dim, $T}"] = @benchmarkable $f($v1, $v1) + SUITE["basic-operations"]["Tensor{2, $dim, $T} $f Tensor{2, $dim, $T}"] = @benchmarkable $f($V2, $V2) + SUITE["basic-operations"]["SymmetricTensor{2, $dim, $T} $f SymmetricTensor{2, $dim, $T}"] = @benchmarkable $f($V2sym, $V2sym) + SUITE["basic-operations"]["Tensor{4, $dim, $T} $f Tensor{4, $dim, $T}"] = @benchmarkable $f($V4, $V4) + SUITE["basic-operations"]["SymmetricTensor{4, $dim, $T} $f SymmetricTensor{4, $dim, $T}"] = @benchmarkable $f($V4sym, $V4sym) + if MIXED_SYM_NONSYM + SUITE["basic-operations"]["Tensor{2, $dim, $T} $f SymmetricTensor{2, $dim, $T}"] = @benchmarkable $f($V2, $V2sym) + SUITE["basic-operations"]["Tensor{4, $dim, $T} $f SymmetricTensor{4, $dim, $T}"] = @benchmarkable $f($V4, $V4sym) + end + end + for f in (:*, :/) + n = rand(T) + SUITE["basic-operations"]["Vec{$dim, $T} $f $T"] = @benchmarkable $f($v1, $n) + SUITE["basic-operations"]["Tensor{2, $dim, $T} $f $T"] = @benchmarkable $f($V2, $n) + SUITE["basic-operations"]["SymmetricTensor{2, $dim, $T} $f $T"] = @benchmarkable $f($V2sym, $n) + SUITE["basic-operations"]["Tensor{4, $dim, $T} $f $T"] = @benchmarkable $f($V4, $n) + SUITE["basic-operations"]["SymmetricTensor{4, $dim, $T} $f $T"] = @benchmarkable $f($V4sym, $n) + if f == :* + SUITE["basic-operations"]["$T $f Vec{$dim, $T}"] = @benchmarkable $f($n, $v1) + SUITE["basic-operations"]["$T $f Tensor{2, $dim, $T}"] = @benchmarkable $f($n, $V2) + SUITE["basic-operations"]["$T $f SymmetricTensor{2, $dim, $T}"] = @benchmarkable $f($n, $V2sym) + SUITE["basic-operations"]["$T $f Tensor{4, $dim, $T}"] = @benchmarkable $f($n, $V4) + SUITE["basic-operations"]["$T $f SymmetricTensor{4, $dim, $T}"] = @benchmarkable $f($n, $V4sym) + end + end + end + end +end + +for dim in (ALL_DIMENSIONS ? (1,2,3) : (3,)) + a32 = tensor_dict[(dim, 1, Float32)] + a64 = tensor_dict[(dim, 1, Float64)] + A32 = tensor_dict[(dim, 2, Float32)] + A64 = tensor_dict[(dim, 2, Float64)] + AA32 = tensor_dict[(dim, 4, Float32)] + AA64 = tensor_dict[(dim, 4, Float64)] + A32s = symtensor_dict[(dim, 2, Float32)] + A64s = symtensor_dict[(dim, 2, Float64)] + AA32s = symtensor_dict[(dim, 4, Float32)] + AA64s = symtensor_dict[(dim, 4, Float64)] + + # promotion between tensortypes + SUITE["promotion"]["SymmetricTensor{2, $dim} -> Tensor{2, $dim}"] = @benchmarkable promote($A64, $A64s) + SUITE["promotion"]["SymmetricTensor{4, $dim} -> Tensor{4, $dim}"] = @benchmarkable promote($AA64, $AA64s) + + # element type promotion + SUITE["promotion"]["Vec{dim, Float32} -> Vec{$dim, Float64}"] = @benchmarkable promote($a64, $a32) + SUITE["promotion"]["Tensor{2, $dim, Float32} -> Tensor{2, $dim, Float64}"] = @benchmarkable promote($A64, $A32) + SUITE["promotion"]["SymmetricTensor{2, $dim, Float32} -> SymmetricTensor{2, $dim, Float64}"] = @benchmarkable promote($A64s, $A32s) + SUITE["promotion"]["Tensor{4, $dim, Float32} -> Tensor{4, $dim, Float64}"] = @benchmarkable promote($A64, $A32) + SUITE["promotion"]["SymmetricTensor{4, $dim, Float32} -> SymmetricTensor{4, $dim, Float64}"] = @benchmarkable promote($A64s, $A32s) + + # test just some operations with mixed tensortype and eltype and hope it catches things like + # https://github.com/Ferrite-FEM/Tensors.jl/pull/5#issuecomment-282518974 + if MIXED_ELTYPES + n = 5 + SUITE["promotion"]["Tensor{2, $dim, Float64} * $Int"] = @benchmarkable *($A64, $n) + SUITE["promotion"]["Tensor{2, $dim, Float64} / $Int"] = @benchmarkable /($A64, $n) + SUITE["promotion"]["Tensor{2, $dim, Float32} + Tensor{2, $dim, Float64}"] = @benchmarkable +($A32, $A64) + SUITE["promotion"]["Tensor{2, $dim, Float32} - Tensor{2, $dim, Float64}"] = @benchmarkable -($A32, $A64) + SUITE["promotion"]["Tensor{2, $dim, Float32} ⋅ Tensor{2, $dim, Float64}"] = @benchmarkable dot($A32, $A64) + SUITE["promotion"]["Tensor{2, $dim, Float32} ⊡ Tensor{2, $dim, Float64}"] = @benchmarkable dcontract($A32, $A64) + SUITE["promotion"]["Tensor{2, $dim, Float32} ⊗ Tensor{2, $dim, Float64}"] = @benchmarkable otimes($A32, $A64) + end +end + +# constructors (only testing for dim = 3) +# could be done cleaner, but https://github.com/JuliaCI/BenchmarkTools.jl/issues/50 +for f in (:zero, :one, :ones, :rand) + SUITE["constructors"]["$f(Tensor{2, 3, Float32})"] = @benchmarkable $(f)(Tensor{2, 3, Float32}) + SUITE["constructors"]["$f(Tensor{4, 3, Float32})"] = @benchmarkable $(f)(Tensor{4, 3, Float32}) + SUITE["constructors"]["$f(SymmetricTensor{2, 3, Float32})"] = @benchmarkable $(f)(SymmetricTensor{2, 3, Float32}) + SUITE["constructors"]["$f(SymmetricTensor{4, 3, Float32})"] = @benchmarkable $(f)(SymmetricTensor{4, 3, Float32}) + SUITE["constructors"]["$f(Tensor{2, 3, Float64})"] = @benchmarkable $(f)(Tensor{2, 3, Float64}) + SUITE["constructors"]["$f(Tensor{4, 3, Float64})"] = @benchmarkable $(f)(Tensor{4, 3, Float64}) + SUITE["constructors"]["$f(SymmetricTensor{2, 3, Float64})"] = @benchmarkable $(f)(SymmetricTensor{2, 3, Float64}) + SUITE["constructors"]["$f(SymmetricTensor{4, 3, Float64})"] = @benchmarkable $(f)(SymmetricTensor{4, 3, Float64}) + if f != :one + SUITE["constructors"]["$f(Vec{3, Float32})"] = @benchmarkable $(f)(Vec{3, Float32}) + SUITE["constructors"]["$f(Vec{3, Float64})"] = @benchmarkable $(f)(Vec{3, Float64}) + end +end + +# create from a Julia array +for order in (1, 2, 4) + dim = 3 + A = rand(Tensors.n_components(Tensor{order, dim})) + SUITE["constructors"]["Tensor{$order, $dim}(A::Array)"] = @benchmarkable Tensor{$order, $dim}($A) + if order != 1 + As = rand(Tensors.n_components(SymmetricTensor{order, dim})) + SUITE["constructors"]["SymmetricTensor{$order, $dim}(A::Array)"] = @benchmarkable SymmetricTensor{$order, $dim}($As) + end +end + +begin + dim = 3 + f1 = (i) -> float(i) + f2 = (i, j) -> float(i) + f4 = (i, j, k, l) -> float(i) + SUITE["constructors"]["Vec{3}(f::Function)"] = @benchmarkable Vec{3}($f1) + SUITE["constructors"]["Tensor{2, 3}(f::Function)"] = @benchmarkable Tensor{2, 3}($f2) + SUITE["constructors"]["Tensor{4, 3}(f::Function)"] = @benchmarkable Tensor{4, 3}($f4) + SUITE["constructors"]["SymmetricTensor{2, 3}(f::Function)"] = @benchmarkable SymmetricTensor{2, 3}($f2) + SUITE["constructors"]["SymmetricTensor{4, 3}(f::Function)"] = @benchmarkable SymmetricTensor{4, 3}($f4) +end diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl index ea46e674..44c8b336 100644 --- a/benchmark/benchmarks.jl +++ b/benchmark/benchmarks.jl @@ -1,39 +1,39 @@ -using Tensors -using BenchmarkTools -using ForwardDiff - -const SUITE = BenchmarkGroup() -const ALL_DIMENSIONS = true -const MIXED_SYM_NONSYM = true -const MIXED_ELTYPES = true - -const dT = ForwardDiff.Dual{Nothing,Float64,4} - -function create_tensors() - tensor_dict = Dict{Tuple{Int, Int, DataType}, AbstractTensor}() - symtensor_dict = Dict{Tuple{Int, Int, DataType}, AbstractTensor}() - for dim in 1:3 - for order in (1,2,4) - for T in (Float32, Float64) - tensor_dict[(dim, order, T)] = rand(Tensor{order, dim, T}) - if order != 1 - symtensor_dict[(dim, order, T)] = rand(SymmetricTensor{order, dim, T}) - else - symtensor_dict[(dim, order, T)] = rand(Tensor{order, dim, T}) - end - end - tensor_dict[(dim, order, dT)] = Tensor{order, dim, dT}(([ForwardDiff.Dual(rand(5)...,) for i in 1:length(rand(Tensor{order, dim}))]...,)) - if order != 1 - symtensor_dict[(dim, order, dT)] = SymmetricTensor{order, dim, dT}(([ForwardDiff.Dual(rand(5)...,) for i in 1:length(rand(SymmetricTensor{order, dim}).data)]...,)) - else - symtensor_dict[(dim, order, dT)] = Tensor{order, dim, dT}(([ForwardDiff.Dual(rand(5)...,) for i in 1:length(rand(Tensor{order, dim}).data)]...,)) - end - end - end - return tensor_dict, symtensor_dict -end - -tensor_dict, symtensor_dict = create_tensors() - -include("benchmark_functions.jl") -include("benchmark_ad.jl") +using Tensors +using BenchmarkTools +using ForwardDiff + +const SUITE = BenchmarkGroup() +const ALL_DIMENSIONS = true +const MIXED_SYM_NONSYM = true +const MIXED_ELTYPES = true + +const dT = ForwardDiff.Dual{Nothing,Float64,4} + +function create_tensors() + tensor_dict = Dict{Tuple{Int, Int, DataType}, AbstractTensor}() + symtensor_dict = Dict{Tuple{Int, Int, DataType}, AbstractTensor}() + for dim in 1:3 + for order in (1,2,4) + for T in (Float32, Float64) + tensor_dict[(dim, order, T)] = rand(Tensor{order, dim, T}) + if order != 1 + symtensor_dict[(dim, order, T)] = rand(SymmetricTensor{order, dim, T}) + else + symtensor_dict[(dim, order, T)] = rand(Tensor{order, dim, T}) + end + end + tensor_dict[(dim, order, dT)] = Tensor{order, dim, dT}(([ForwardDiff.Dual(rand(5)...,) for i in 1:length(rand(Tensor{order, dim}))]...,)) + if order != 1 + symtensor_dict[(dim, order, dT)] = SymmetricTensor{order, dim, dT}(([ForwardDiff.Dual(rand(5)...,) for i in 1:length(rand(SymmetricTensor{order, dim}).data)]...,)) + else + symtensor_dict[(dim, order, dT)] = Tensor{order, dim, dT}(([ForwardDiff.Dual(rand(5)...,) for i in 1:length(rand(Tensor{order, dim}).data)]...,)) + end + end + end + return tensor_dict, symtensor_dict +end + +tensor_dict, symtensor_dict = create_tensors() + +include("benchmark_functions.jl") +include("benchmark_ad.jl") diff --git a/docs/Manifest.toml b/docs/Manifest.toml index efd38464..8eae50e3 100644 --- a/docs/Manifest.toml +++ b/docs/Manifest.toml @@ -1,336 +1,336 @@ -# This file is machine-generated - editing it directly is not advised - -julia_version = "1.9.3" -manifest_format = "2.0" -project_hash = "063b18da735845e327fc97d98706d227f804d89d" - -[[deps.ANSIColoredPrinters]] -git-tree-sha1 = "574baf8110975760d391c710b6341da1afa48d8c" -uuid = "a4c015fc-c6ff-483c-b24f-f7ea428134e9" -version = "0.0.1" - -[[deps.ArgTools]] -uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" -version = "1.1.1" - -[[deps.Artifacts]] -uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" - -[[deps.Base64]] -uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" - -[[deps.CommonSubexpressions]] -deps = ["MacroTools", "Test"] -git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7" -uuid = "bbf7d656-a473-5ed7-a52c-81e309532950" -version = "0.3.0" - -[[deps.CompilerSupportLibraries_jll]] -deps = ["Artifacts", "Libdl"] -uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" -version = "1.0.5+0" - -[[deps.Dates]] -deps = ["Printf"] -uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" - -[[deps.DiffResults]] -deps = ["StaticArraysCore"] -git-tree-sha1 = "782dd5f4561f5d267313f23853baaaa4c52ea621" -uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5" -version = "1.1.0" - -[[deps.DiffRules]] -deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"] -git-tree-sha1 = "23163d55f885173722d1e4cf0f6110cdbaf7e272" -uuid = "b552c78f-8df3-52c6-915a-8e097449b14b" -version = "1.15.1" - -[[deps.DocStringExtensions]] -deps = ["LibGit2"] -git-tree-sha1 = "2fb1e02f2b635d0845df5d7c167fec4dd739b00d" -uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" -version = "0.9.3" - -[[deps.Documenter]] -deps = ["ANSIColoredPrinters", "Base64", "Dates", "DocStringExtensions", "IOCapture", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "REPL", "Test", "Unicode"] -git-tree-sha1 = "39fd748a73dce4c05a9655475e437170d8fb1b67" -uuid = "e30172f5-a6a5-5a46-863b-614d45cd2de4" -version = "0.27.25" - -[[deps.Downloads]] -deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"] -uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" -version = "1.6.0" - -[[deps.FileWatching]] -uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" - -[[deps.ForwardDiff]] -deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions"] -git-tree-sha1 = "00e252f4d706b3d55a8863432e742bf5717b498d" -uuid = "f6369f11-7733-5829-9624-2563aa707210" -version = "0.10.35" -weakdeps = ["StaticArrays"] - - [deps.ForwardDiff.extensions] - ForwardDiffStaticArraysExt = "StaticArrays" - -[[deps.IOCapture]] -deps = ["Logging", "Random"] -git-tree-sha1 = "d75853a0bdbfb1ac815478bacd89cd27b550ace6" -uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89" -version = "0.2.3" - -[[deps.InteractiveUtils]] -deps = ["Markdown"] -uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" - -[[deps.IrrationalConstants]] -git-tree-sha1 = "630b497eafcc20001bba38a4651b327dcfc491d2" -uuid = "92d709cd-6900-40b7-9082-c6be49f344b6" -version = "0.2.2" - -[[deps.JLLWrappers]] -deps = ["Preferences"] -git-tree-sha1 = "abc9885a7ca2052a736a600f7fa66209f96506e1" -uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210" -version = "1.4.1" - -[[deps.JSON]] -deps = ["Dates", "Mmap", "Parsers", "Unicode"] -git-tree-sha1 = "31e996f0a15c7b280ba9f76636b3ff9e2ae58c9a" -uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" -version = "0.21.4" - -[[deps.LibCURL]] -deps = ["LibCURL_jll", "MozillaCACerts_jll"] -uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" -version = "0.6.3" - -[[deps.LibCURL_jll]] -deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"] -uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" -version = "7.84.0+0" - -[[deps.LibGit2]] -deps = ["Base64", "NetworkOptions", "Printf", "SHA"] -uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" - -[[deps.LibSSH2_jll]] -deps = ["Artifacts", "Libdl", "MbedTLS_jll"] -uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" -version = "1.10.2+0" - -[[deps.Libdl]] -uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" - -[[deps.LinearAlgebra]] -deps = ["Libdl", "OpenBLAS_jll", "libblastrampoline_jll"] -uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" - -[[deps.LogExpFunctions]] -deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"] -git-tree-sha1 = "c3ce8e7420b3a6e071e0fe4745f5d4300e37b13f" -uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688" -version = "0.3.24" - - [deps.LogExpFunctions.extensions] - LogExpFunctionsChainRulesCoreExt = "ChainRulesCore" - LogExpFunctionsChangesOfVariablesExt = "ChangesOfVariables" - LogExpFunctionsInverseFunctionsExt = "InverseFunctions" - - [deps.LogExpFunctions.weakdeps] - ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" - ChangesOfVariables = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0" - InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112" - -[[deps.Logging]] -uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" - -[[deps.MacroTools]] -deps = ["Markdown", "Random"] -git-tree-sha1 = "42324d08725e200c23d4dfb549e0d5d89dede2d2" -uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" -version = "0.5.10" - -[[deps.Markdown]] -deps = ["Base64"] -uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" - -[[deps.MbedTLS_jll]] -deps = ["Artifacts", "Libdl"] -uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" -version = "2.28.2+0" - -[[deps.Mmap]] -uuid = "a63ad114-7e13-5084-954f-fe012c677804" - -[[deps.MozillaCACerts_jll]] -uuid = "14a3606d-f60d-562e-9121-12d972cd8159" -version = "2022.10.11" - -[[deps.NaNMath]] -deps = ["OpenLibm_jll"] -git-tree-sha1 = "0877504529a3e5c3343c6f8b4c0381e57e4387e4" -uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" -version = "1.0.2" - -[[deps.NetworkOptions]] -uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" -version = "1.2.0" - -[[deps.OpenBLAS_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] -uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" -version = "0.3.21+4" - -[[deps.OpenLibm_jll]] -deps = ["Artifacts", "Libdl"] -uuid = "05823500-19ac-5b8b-9628-191a04bc5112" -version = "0.8.1+0" - -[[deps.OpenSpecFun_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1" -uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e" -version = "0.5.5+0" - -[[deps.Parsers]] -deps = ["Dates", "PrecompileTools", "UUIDs"] -git-tree-sha1 = "716e24b21538abc91f6205fd1d8363f39b442851" -uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" -version = "2.7.2" - -[[deps.Pkg]] -deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] -uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" -version = "1.9.2" - -[[deps.PrecompileTools]] -deps = ["Preferences"] -git-tree-sha1 = "9673d39decc5feece56ef3940e5dafba15ba0f81" -uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a" -version = "1.1.2" - -[[deps.Preferences]] -deps = ["TOML"] -git-tree-sha1 = "7eb1686b4f04b82f96ed7a4ea5890a4f0c7a09f1" -uuid = "21216c6a-2e73-6563-6e65-726566657250" -version = "1.4.0" - -[[deps.Printf]] -deps = ["Unicode"] -uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" - -[[deps.REPL]] -deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] -uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" - -[[deps.Random]] -deps = ["SHA", "Serialization"] -uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" - -[[deps.SHA]] -uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" -version = "0.7.0" - -[[deps.SIMD]] -deps = ["PrecompileTools"] -git-tree-sha1 = "0e270732477b9e551d884e6b07e23bb2ec947790" -uuid = "fdea26ae-647d-5447-a871-4b548cad5224" -version = "3.4.5" - -[[deps.Serialization]] -uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" - -[[deps.Sockets]] -uuid = "6462fe0b-24de-5631-8697-dd941f90decc" - -[[deps.SparseArrays]] -deps = ["Libdl", "LinearAlgebra", "Random", "Serialization", "SuiteSparse_jll"] -uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" - -[[deps.SpecialFunctions]] -deps = ["IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"] -git-tree-sha1 = "7beb031cf8145577fbccacd94b8a8f4ce78428d3" -uuid = "276daf66-3868-5448-9aa4-cd146d93841b" -version = "2.3.0" - - [deps.SpecialFunctions.extensions] - SpecialFunctionsChainRulesCoreExt = "ChainRulesCore" - - [deps.SpecialFunctions.weakdeps] - ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" - -[[deps.StaticArrays]] -deps = ["LinearAlgebra", "Random", "StaticArraysCore"] -git-tree-sha1 = "9cabadf6e7cd2349b6cf49f1915ad2028d65e881" -uuid = "90137ffa-7385-5640-81b9-e52037218182" -version = "1.6.2" -weakdeps = ["Statistics"] - - [deps.StaticArrays.extensions] - StaticArraysStatisticsExt = "Statistics" - -[[deps.StaticArraysCore]] -git-tree-sha1 = "36b3d696ce6366023a0ea192b4cd442268995a0d" -uuid = "1e83bf80-4336-4d27-bf5d-d5a4f845583c" -version = "1.4.2" - -[[deps.Statistics]] -deps = ["LinearAlgebra", "SparseArrays"] -uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" -version = "1.9.0" - -[[deps.SuiteSparse_jll]] -deps = ["Artifacts", "Libdl", "Pkg", "libblastrampoline_jll"] -uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c" -version = "5.10.1+6" - -[[deps.TOML]] -deps = ["Dates"] -uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" -version = "1.0.3" - -[[deps.Tar]] -deps = ["ArgTools", "SHA"] -uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" -version = "1.10.0" - -[[deps.Tensors]] -deps = ["ForwardDiff", "LinearAlgebra", "PrecompileTools", "SIMD", "StaticArrays", "Statistics"] -path = ".." -uuid = "48a634ad-e948-5137-8d70-aa71f2a747f4" -version = "1.16.1" - -[[deps.Test]] -deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] -uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - -[[deps.UUIDs]] -deps = ["Random", "SHA"] -uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" - -[[deps.Unicode]] -uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" - -[[deps.Zlib_jll]] -deps = ["Libdl"] -uuid = "83775a58-1f1d-513f-b197-d71354ab007a" -version = "1.2.13+0" - -[[deps.libblastrampoline_jll]] -deps = ["Artifacts", "Libdl"] -uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" -version = "5.8.0+0" - -[[deps.nghttp2_jll]] -deps = ["Artifacts", "Libdl"] -uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" -version = "1.48.0+0" - -[[deps.p7zip_jll]] -deps = ["Artifacts", "Libdl"] -uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" -version = "17.4.0+0" +# This file is machine-generated - editing it directly is not advised + +julia_version = "1.9.3" +manifest_format = "2.0" +project_hash = "063b18da735845e327fc97d98706d227f804d89d" + +[[deps.ANSIColoredPrinters]] +git-tree-sha1 = "574baf8110975760d391c710b6341da1afa48d8c" +uuid = "a4c015fc-c6ff-483c-b24f-f7ea428134e9" +version = "0.0.1" + +[[deps.ArgTools]] +uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" +version = "1.1.1" + +[[deps.Artifacts]] +uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" + +[[deps.Base64]] +uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" + +[[deps.CommonSubexpressions]] +deps = ["MacroTools", "Test"] +git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7" +uuid = "bbf7d656-a473-5ed7-a52c-81e309532950" +version = "0.3.0" + +[[deps.CompilerSupportLibraries_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" +version = "1.0.5+0" + +[[deps.Dates]] +deps = ["Printf"] +uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" + +[[deps.DiffResults]] +deps = ["StaticArraysCore"] +git-tree-sha1 = "782dd5f4561f5d267313f23853baaaa4c52ea621" +uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5" +version = "1.1.0" + +[[deps.DiffRules]] +deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"] +git-tree-sha1 = "23163d55f885173722d1e4cf0f6110cdbaf7e272" +uuid = "b552c78f-8df3-52c6-915a-8e097449b14b" +version = "1.15.1" + +[[deps.DocStringExtensions]] +deps = ["LibGit2"] +git-tree-sha1 = "2fb1e02f2b635d0845df5d7c167fec4dd739b00d" +uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" +version = "0.9.3" + +[[deps.Documenter]] +deps = ["ANSIColoredPrinters", "Base64", "Dates", "DocStringExtensions", "IOCapture", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "REPL", "Test", "Unicode"] +git-tree-sha1 = "39fd748a73dce4c05a9655475e437170d8fb1b67" +uuid = "e30172f5-a6a5-5a46-863b-614d45cd2de4" +version = "0.27.25" + +[[deps.Downloads]] +deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"] +uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" +version = "1.6.0" + +[[deps.FileWatching]] +uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" + +[[deps.ForwardDiff]] +deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions"] +git-tree-sha1 = "00e252f4d706b3d55a8863432e742bf5717b498d" +uuid = "f6369f11-7733-5829-9624-2563aa707210" +version = "0.10.35" +weakdeps = ["StaticArrays"] + + [deps.ForwardDiff.extensions] + ForwardDiffStaticArraysExt = "StaticArrays" + +[[deps.IOCapture]] +deps = ["Logging", "Random"] +git-tree-sha1 = "d75853a0bdbfb1ac815478bacd89cd27b550ace6" +uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89" +version = "0.2.3" + +[[deps.InteractiveUtils]] +deps = ["Markdown"] +uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" + +[[deps.IrrationalConstants]] +git-tree-sha1 = "630b497eafcc20001bba38a4651b327dcfc491d2" +uuid = "92d709cd-6900-40b7-9082-c6be49f344b6" +version = "0.2.2" + +[[deps.JLLWrappers]] +deps = ["Preferences"] +git-tree-sha1 = "abc9885a7ca2052a736a600f7fa66209f96506e1" +uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210" +version = "1.4.1" + +[[deps.JSON]] +deps = ["Dates", "Mmap", "Parsers", "Unicode"] +git-tree-sha1 = "31e996f0a15c7b280ba9f76636b3ff9e2ae58c9a" +uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" +version = "0.21.4" + +[[deps.LibCURL]] +deps = ["LibCURL_jll", "MozillaCACerts_jll"] +uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" +version = "0.6.3" + +[[deps.LibCURL_jll]] +deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"] +uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" +version = "7.84.0+0" + +[[deps.LibGit2]] +deps = ["Base64", "NetworkOptions", "Printf", "SHA"] +uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" + +[[deps.LibSSH2_jll]] +deps = ["Artifacts", "Libdl", "MbedTLS_jll"] +uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" +version = "1.10.2+0" + +[[deps.Libdl]] +uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" + +[[deps.LinearAlgebra]] +deps = ["Libdl", "OpenBLAS_jll", "libblastrampoline_jll"] +uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" + +[[deps.LogExpFunctions]] +deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"] +git-tree-sha1 = "c3ce8e7420b3a6e071e0fe4745f5d4300e37b13f" +uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688" +version = "0.3.24" + + [deps.LogExpFunctions.extensions] + LogExpFunctionsChainRulesCoreExt = "ChainRulesCore" + LogExpFunctionsChangesOfVariablesExt = "ChangesOfVariables" + LogExpFunctionsInverseFunctionsExt = "InverseFunctions" + + [deps.LogExpFunctions.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + ChangesOfVariables = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0" + InverseFunctions = "3587e190-3f89-42d0-90ee-14403ec27112" + +[[deps.Logging]] +uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" + +[[deps.MacroTools]] +deps = ["Markdown", "Random"] +git-tree-sha1 = "42324d08725e200c23d4dfb549e0d5d89dede2d2" +uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" +version = "0.5.10" + +[[deps.Markdown]] +deps = ["Base64"] +uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" + +[[deps.MbedTLS_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" +version = "2.28.2+0" + +[[deps.Mmap]] +uuid = "a63ad114-7e13-5084-954f-fe012c677804" + +[[deps.MozillaCACerts_jll]] +uuid = "14a3606d-f60d-562e-9121-12d972cd8159" +version = "2022.10.11" + +[[deps.NaNMath]] +deps = ["OpenLibm_jll"] +git-tree-sha1 = "0877504529a3e5c3343c6f8b4c0381e57e4387e4" +uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" +version = "1.0.2" + +[[deps.NetworkOptions]] +uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" +version = "1.2.0" + +[[deps.OpenBLAS_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] +uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" +version = "0.3.21+4" + +[[deps.OpenLibm_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "05823500-19ac-5b8b-9628-191a04bc5112" +version = "0.8.1+0" + +[[deps.OpenSpecFun_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1" +uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e" +version = "0.5.5+0" + +[[deps.Parsers]] +deps = ["Dates", "PrecompileTools", "UUIDs"] +git-tree-sha1 = "716e24b21538abc91f6205fd1d8363f39b442851" +uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" +version = "2.7.2" + +[[deps.Pkg]] +deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] +uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" +version = "1.9.2" + +[[deps.PrecompileTools]] +deps = ["Preferences"] +git-tree-sha1 = "9673d39decc5feece56ef3940e5dafba15ba0f81" +uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a" +version = "1.1.2" + +[[deps.Preferences]] +deps = ["TOML"] +git-tree-sha1 = "7eb1686b4f04b82f96ed7a4ea5890a4f0c7a09f1" +uuid = "21216c6a-2e73-6563-6e65-726566657250" +version = "1.4.0" + +[[deps.Printf]] +deps = ["Unicode"] +uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" + +[[deps.REPL]] +deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] +uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" + +[[deps.Random]] +deps = ["SHA", "Serialization"] +uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" + +[[deps.SHA]] +uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" +version = "0.7.0" + +[[deps.SIMD]] +deps = ["PrecompileTools"] +git-tree-sha1 = "0e270732477b9e551d884e6b07e23bb2ec947790" +uuid = "fdea26ae-647d-5447-a871-4b548cad5224" +version = "3.4.5" + +[[deps.Serialization]] +uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" + +[[deps.Sockets]] +uuid = "6462fe0b-24de-5631-8697-dd941f90decc" + +[[deps.SparseArrays]] +deps = ["Libdl", "LinearAlgebra", "Random", "Serialization", "SuiteSparse_jll"] +uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" + +[[deps.SpecialFunctions]] +deps = ["IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"] +git-tree-sha1 = "7beb031cf8145577fbccacd94b8a8f4ce78428d3" +uuid = "276daf66-3868-5448-9aa4-cd146d93841b" +version = "2.3.0" + + [deps.SpecialFunctions.extensions] + SpecialFunctionsChainRulesCoreExt = "ChainRulesCore" + + [deps.SpecialFunctions.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + +[[deps.StaticArrays]] +deps = ["LinearAlgebra", "Random", "StaticArraysCore"] +git-tree-sha1 = "9cabadf6e7cd2349b6cf49f1915ad2028d65e881" +uuid = "90137ffa-7385-5640-81b9-e52037218182" +version = "1.6.2" +weakdeps = ["Statistics"] + + [deps.StaticArrays.extensions] + StaticArraysStatisticsExt = "Statistics" + +[[deps.StaticArraysCore]] +git-tree-sha1 = "36b3d696ce6366023a0ea192b4cd442268995a0d" +uuid = "1e83bf80-4336-4d27-bf5d-d5a4f845583c" +version = "1.4.2" + +[[deps.Statistics]] +deps = ["LinearAlgebra", "SparseArrays"] +uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" +version = "1.9.0" + +[[deps.SuiteSparse_jll]] +deps = ["Artifacts", "Libdl", "Pkg", "libblastrampoline_jll"] +uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c" +version = "5.10.1+6" + +[[deps.TOML]] +deps = ["Dates"] +uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" +version = "1.0.3" + +[[deps.Tar]] +deps = ["ArgTools", "SHA"] +uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" +version = "1.10.0" + +[[deps.Tensors]] +deps = ["ForwardDiff", "LinearAlgebra", "PrecompileTools", "SIMD", "StaticArrays", "Statistics"] +path = ".." +uuid = "48a634ad-e948-5137-8d70-aa71f2a747f4" +version = "1.16.1" + +[[deps.Test]] +deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] +uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[[deps.UUIDs]] +deps = ["Random", "SHA"] +uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" + +[[deps.Unicode]] +uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" + +[[deps.Zlib_jll]] +deps = ["Libdl"] +uuid = "83775a58-1f1d-513f-b197-d71354ab007a" +version = "1.2.13+0" + +[[deps.libblastrampoline_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" +version = "5.8.0+0" + +[[deps.nghttp2_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" +version = "1.48.0+0" + +[[deps.p7zip_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" +version = "17.4.0+0" diff --git a/docs/Project.toml b/docs/Project.toml index cb30e2e6..754990d2 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -1,3 +1,3 @@ -[deps] -Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" -Tensors = "48a634ad-e948-5137-8d70-aa71f2a747f4" +[deps] +Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" +Tensors = "48a634ad-e948-5137-8d70-aa71f2a747f4" diff --git a/docs/make.jl b/docs/make.jl index b8e242a0..d6f0c9c6 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -1,33 +1,33 @@ -using Documenter, Tensors - -# Setup for doctests in docstrings -DocMeta.setdocmeta!(Tensors, :DocTestSetup, - quote - using Random - Random.seed!(1234) - using Tensors - end -) - -makedocs( - format = Documenter.HTML(prettyurls = get(ENV, "CI", nothing) == "true"), - modules = [Tensors], - sitename = "Tensors.jl", - pages = Any[ - "Home" => "index.md", - "Manual" => [ - "man/constructing_tensors.md", - "man/indexing.md", - "man/binary_operators.md", - "man/other_operators.md", - "man/storing_tensors.md", - "man/automatic_differentiation.md", - ], - "Benchmarks" => "benchmarks.md", - "Demos" => "demos.md" - ] -) - -deploydocs( - repo = "github.com/Ferrite-FEM/Tensors.jl.git", -) +using Documenter, Tensors + +# Setup for doctests in docstrings +DocMeta.setdocmeta!(Tensors, :DocTestSetup, + quote + using Random + Random.seed!(1234) + using Tensors + end +) + +makedocs( + format = Documenter.HTML(prettyurls = get(ENV, "CI", nothing) == "true"), + modules = [Tensors], + sitename = "Tensors.jl", + pages = Any[ + "Home" => "index.md", + "Manual" => [ + "man/constructing_tensors.md", + "man/indexing.md", + "man/binary_operators.md", + "man/other_operators.md", + "man/storing_tensors.md", + "man/automatic_differentiation.md", + ], + "Benchmarks" => "benchmarks.md", + "Demos" => "demos.md" + ] +) + +deploydocs( + repo = "github.com/Ferrite-FEM/Tensors.jl.git", +) diff --git a/docs/src/benchmarks.md b/docs/src/benchmarks.md index 677210fc..7b4c01b7 100644 --- a/docs/src/benchmarks.md +++ b/docs/src/benchmarks.md @@ -1,59 +1,59 @@ -# Benchmarks - -Here are some benchmark timings for tensors in 3 dimensions. For comparison -the timings for the same operations using standard Julia `Array`s are also -presented. - -In the table below, `a` denotes a vector, `A`, `As` denotes second order -non-symmetric and symmetric tensors and `AA`, `AAs` denotes fourth order -non-symmetric and symmetric tensors respectively. - -| Operation | `Tensor` | `Array` | speed-up | -|:-----------|---------:|--------:|---------:| -| **Single contraction** | | | | -| `a ⋅ a` | 1.241 ns | 9.795 ns | ×7.9 | -| `A ⋅ a` | 2.161 ns | 58.769 ns | ×27.2 | -| `A ⋅ A` | 3.117 ns | 44.395 ns | ×14.2 | -| `As ⋅ As` | 5.125 ns | 44.498 ns | ×8.7 | -| **Double contraction** | | | | -| `A ⊡ A` | 1.927 ns | 12.189 ns | ×6.3 | -| `As ⊡ As` | 1.927 ns | 12.187 ns | ×6.3 | -| `AA ⊡ A` | 6.087 ns | 78.554 ns | ×12.9 | -| `AA ⊡ AA` | 60.820 ns | 280.502 ns | ×4.6 | -| `AAs ⊡ AAs` | 22.104 ns | 281.003 ns | ×12.7 | -| `As ⊡ AAs ⊡ As` | 9.466 ns | 89.747 ns | ×9.5 | -| **Outer product** | | | | -| `a ⊗ a` | 2.167 ns | 32.447 ns | ×15.0 | -| `A ⊗ A` | 9.801 ns | 86.568 ns | ×8.8 | -| `As ⊗ As` | 4.311 ns | 87.830 ns | ×20.4 | -| **Other operations** | | | | -| `det(A)` | 1.924 ns | 177.134 ns | ×92.1 | -| `det(As)` | 1.924 ns | 182.831 ns | ×95.0 | -| `inv(A)` | 6.099 ns | 595.591 ns | ×97.7 | -| `inv(As)` | 4.587 ns | 635.858 ns | ×138.6 | -| `norm(a)` | 1.494 ns | 9.838 ns | ×6.6 | -| `norm(A)` | 1.990 ns | 16.752 ns | ×8.4 | -| `norm(As)` | 2.011 ns | 16.757 ns | ×8.3 | -| `norm(AA)` | 9.283 ns | 28.125 ns | ×3.0 | -| `norm(AAs)` | 5.422 ns | 28.134 ns | ×5.2 | -| `a × a` | 1.921 ns | 32.736 ns | ×17.0 | - - -The benchmarks are generated by -[`benchmark_doc.jl`](https://github.com/Ferrite-FEM/Tensors.jl/blob/master/benchmark/benchmark_doc.jl) -on the following system: - -``` -julia> versioninfo() - -Julia Version 0.6.0-pre.beta.297 -Commit 2a61131* (2017-04-24 23:57 UTC) -Platform Info: - OS: Linux (x86_64-linux-gnu) - CPU: Intel(R) Core(TM) i5-7600K CPU @ 3.80GHz - WORD_SIZE: 64 - BLAS: libopenblas (USE64BITINT NO_AFFINITY HASWELL) - LAPACK: libopenblas64_ - LIBM: libopenlibm - LLVM: libLLVM-3.9.1 (ORCJIT, broadwell) -``` +# Benchmarks + +Here are some benchmark timings for tensors in 3 dimensions. For comparison +the timings for the same operations using standard Julia `Array`s are also +presented. + +In the table below, `a` denotes a vector, `A`, `As` denotes second order +non-symmetric and symmetric tensors and `AA`, `AAs` denotes fourth order +non-symmetric and symmetric tensors respectively. + +| Operation | `Tensor` | `Array` | speed-up | +|:-----------|---------:|--------:|---------:| +| **Single contraction** | | | | +| `a ⋅ a` | 1.241 ns | 9.795 ns | ×7.9 | +| `A ⋅ a` | 2.161 ns | 58.769 ns | ×27.2 | +| `A ⋅ A` | 3.117 ns | 44.395 ns | ×14.2 | +| `As ⋅ As` | 5.125 ns | 44.498 ns | ×8.7 | +| **Double contraction** | | | | +| `A ⊡ A` | 1.927 ns | 12.189 ns | ×6.3 | +| `As ⊡ As` | 1.927 ns | 12.187 ns | ×6.3 | +| `AA ⊡ A` | 6.087 ns | 78.554 ns | ×12.9 | +| `AA ⊡ AA` | 60.820 ns | 280.502 ns | ×4.6 | +| `AAs ⊡ AAs` | 22.104 ns | 281.003 ns | ×12.7 | +| `As ⊡ AAs ⊡ As` | 9.466 ns | 89.747 ns | ×9.5 | +| **Outer product** | | | | +| `a ⊗ a` | 2.167 ns | 32.447 ns | ×15.0 | +| `A ⊗ A` | 9.801 ns | 86.568 ns | ×8.8 | +| `As ⊗ As` | 4.311 ns | 87.830 ns | ×20.4 | +| **Other operations** | | | | +| `det(A)` | 1.924 ns | 177.134 ns | ×92.1 | +| `det(As)` | 1.924 ns | 182.831 ns | ×95.0 | +| `inv(A)` | 6.099 ns | 595.591 ns | ×97.7 | +| `inv(As)` | 4.587 ns | 635.858 ns | ×138.6 | +| `norm(a)` | 1.494 ns | 9.838 ns | ×6.6 | +| `norm(A)` | 1.990 ns | 16.752 ns | ×8.4 | +| `norm(As)` | 2.011 ns | 16.757 ns | ×8.3 | +| `norm(AA)` | 9.283 ns | 28.125 ns | ×3.0 | +| `norm(AAs)` | 5.422 ns | 28.134 ns | ×5.2 | +| `a × a` | 1.921 ns | 32.736 ns | ×17.0 | + + +The benchmarks are generated by +[`benchmark_doc.jl`](https://github.com/Ferrite-FEM/Tensors.jl/blob/master/benchmark/benchmark_doc.jl) +on the following system: + +``` +julia> versioninfo() + +Julia Version 0.6.0-pre.beta.297 +Commit 2a61131* (2017-04-24 23:57 UTC) +Platform Info: + OS: Linux (x86_64-linux-gnu) + CPU: Intel(R) Core(TM) i5-7600K CPU @ 3.80GHz + WORD_SIZE: 64 + BLAS: libopenblas (USE64BITINT NO_AFFINITY HASWELL) + LAPACK: libopenblas64_ + LIBM: libopenlibm + LLVM: libLLVM-3.9.1 (ORCJIT, broadwell) +``` diff --git a/docs/src/demos.md b/docs/src/demos.md index c32c095d..9742a4ae 100644 --- a/docs/src/demos.md +++ b/docs/src/demos.md @@ -1,141 +1,141 @@ -# Demos - -This section contain a few demos of applying `Tensors` to continuum mechanics. - -## Creating the linear elasticity tensor - -The linear elasticity tensor ``\mathbf{C}`` can be defined from the Lamé parameters ``\lambda`` and ``\mu`` by the expression - -```math -\mathbf{C}_{ijkl} = \lambda \delta_{ij}\delta_{kl} + \mu(\delta_{ik}\delta_{jl} + \delta_{il}\delta_{jk}), -``` - -where ``\delta_{ij} = 1`` if ``i = j`` otherwise ``0``. It can also be computed in terms of the Young's modulus ``E`` and Poisson's ratio ``\nu`` by the conversion formulas ``\lambda = E\nu / [(1 + \nu)(1 - 2\nu)]`` and ``\mu = E / [2(1 + \nu)]``. - -The code below creates the elasticity tensor for given parameters ``E`` and ``\nu`` and dimension `dim`. Note the similarity between the mathematical formula and the code. - -```julia -using Tensors -E = 200e9 -ν = 0.3 -dim = 2 -λ = E*ν / ((1 + ν) * (1 - 2ν)) -μ = E / (2(1 + ν)) -δ(i,j) = i == j ? 1.0 : 0.0 -f = (i,j,k,l) -> λ*δ(i,j)*δ(k,l) + μ*(δ(i,k)*δ(j,l) + δ(i,l)*δ(j,k)) - -C = SymmetricTensor{4, dim}(f) -``` - -## Nonlinear elasticity material - -For a deformation gradient - -```math -\mathbf{F} = \mathbf{I} + \mathbf{u} \otimes \nabla, -``` - -where ``\mathbf{u}`` is the deformation from the reference to the current configuration, the right Cauchy-Green deformation tensor is defined by - -```math -\mathbf{C} = \mathbf{F}^T \cdot \mathbf{F}. -``` - -The Second Piola Krichoff stress tensor ``\mathbf{S}`` is derived from the Helmholtz free energy ``\Psi`` by the relation - -```math -\mathbf{S} = 2 \frac{\partial \Psi}{\partial \mathbf{C}}. -``` - -We can define potential energy of the material as - -```math -\Psi(\mathbf{C}) = 1/2 \mu (\mathrm{tr}(\hat{\mathbf{C}}) - 3) + K_b(J-1)^2, -``` - -where ``\hat{\mathbf{C}} = \mathrm{det}(\mathbf{C})^{-1/3} \mathbf{C}`` and ``J = \det(\mathbf{F}) = \sqrt{\det(\mathbf{C})}``, and the shear and bulk modulus are given by ``\mu`` and ``K_b`` respectively. - -This free energy function can be implemented in `Tensors` as: - -```julia -function Ψ(C, μ, Kb) - detC = det(C) - J = sqrt(detC) - Ĉ = detC^(-1/3)*C - return 1/2*(μ * (tr(Ĉ)- 3) + Kb*(J-1)^2) -end -``` - -The analytical expression for the Second Piola Kirchoff tensor is - -```math -\mathbf{S} = \mu \det(\mathbf{C})^{-1/3}(\mathbf{I} - 1/3 \mathrm{tr}(\mathbf{C})\mathbf{C}^{-1}) + K_b(J-1)J\mathbf{C}^{-1} -``` - -which can be implemented by the function - -```julia -function S(C, μ, Kb) - I = one(C) - J = sqrt(det(C)) - invC = inv(C) - return μ * det(C)^(-1/3)*(I - 1/3*tr(C)*invC) + Kb*(J-1)*J*invC -end -``` - -### Automatic differentiation - -For some material models it can be cumbersome to compute the analytical expression for the Second Piola Kirchoff tensor. We can then instead use Automatic Differentiation (AD). Below is an example which computes the Second Piola Kirchoff tensor using AD and compares it to the analytical answer. - -```@meta -DocTestSetup = quote - using Random - Random.seed!(1234) - using Tensors - E = 200e9 - ν = 0.3 - dim = 2 - λ = E*ν / ((1 + ν) * (1 - 2ν)) - μ = E / (2(1 + ν)) - δ(i,j) = i == j ? 1.0 : 0.0 - f = (i,j,k,l) -> λ*δ(i,j)*δ(k,l) + μ*(δ(i,k)*δ(j,l) + δ(i,l)*δ(j,k)) - - C = SymmetricTensor{4, dim}(f) - - function Ψ(C, μ, Kb) - detC = det(C) - J = sqrt(detC) - Ĉ = detC^(-1/3)*C - return 1/2*(μ * (tr(Ĉ)- 3) + Kb*(J-1)^2) - end - - function S(C, μ, Kb) - I = one(C) - J = sqrt(det(C)) - invC = inv(C) - return μ * det(C)^(-1/3)*(I - 1/3*tr(C)*invC) + Kb*(J-1)*J*invC - end -end -``` - -```jldoctest -julia> μ = 1e10; - -julia> Kb = 1.66e11; - -julia> F = one(Tensor{2,3}) + rand(Tensor{2,3}); - -julia> C = tdot(F); - -julia> S_AD = 2 * gradient(C -> Ψ(C, μ, Kb), C) -3×3 SymmetricTensor{2, 3, Float64, 6}: - 4.30534e11 -2.30282e11 -8.52861e10 - -2.30282e11 4.38793e11 -2.64481e11 - -8.52861e10 -2.64481e11 7.85515e11 - -julia> S(C, μ, Kb) -3×3 SymmetricTensor{2, 3, Float64, 6}: - 4.30534e11 -2.30282e11 -8.52861e10 - -2.30282e11 4.38793e11 -2.64481e11 - -8.52861e10 -2.64481e11 7.85515e11 -``` +# Demos + +This section contain a few demos of applying `Tensors` to continuum mechanics. + +## Creating the linear elasticity tensor + +The linear elasticity tensor ``\mathbf{C}`` can be defined from the Lamé parameters ``\lambda`` and ``\mu`` by the expression + +```math +\mathbf{C}_{ijkl} = \lambda \delta_{ij}\delta_{kl} + \mu(\delta_{ik}\delta_{jl} + \delta_{il}\delta_{jk}), +``` + +where ``\delta_{ij} = 1`` if ``i = j`` otherwise ``0``. It can also be computed in terms of the Young's modulus ``E`` and Poisson's ratio ``\nu`` by the conversion formulas ``\lambda = E\nu / [(1 + \nu)(1 - 2\nu)]`` and ``\mu = E / [2(1 + \nu)]``. + +The code below creates the elasticity tensor for given parameters ``E`` and ``\nu`` and dimension `dim`. Note the similarity between the mathematical formula and the code. + +```julia +using Tensors +E = 200e9 +ν = 0.3 +dim = 2 +λ = E*ν / ((1 + ν) * (1 - 2ν)) +μ = E / (2(1 + ν)) +δ(i,j) = i == j ? 1.0 : 0.0 +f = (i,j,k,l) -> λ*δ(i,j)*δ(k,l) + μ*(δ(i,k)*δ(j,l) + δ(i,l)*δ(j,k)) + +C = SymmetricTensor{4, dim}(f) +``` + +## Nonlinear elasticity material + +For a deformation gradient + +```math +\mathbf{F} = \mathbf{I} + \mathbf{u} \otimes \nabla, +``` + +where ``\mathbf{u}`` is the deformation from the reference to the current configuration, the right Cauchy-Green deformation tensor is defined by + +```math +\mathbf{C} = \mathbf{F}^T \cdot \mathbf{F}. +``` + +The Second Piola Krichoff stress tensor ``\mathbf{S}`` is derived from the Helmholtz free energy ``\Psi`` by the relation + +```math +\mathbf{S} = 2 \frac{\partial \Psi}{\partial \mathbf{C}}. +``` + +We can define potential energy of the material as + +```math +\Psi(\mathbf{C}) = 1/2 \mu (\mathrm{tr}(\hat{\mathbf{C}}) - 3) + K_b(J-1)^2, +``` + +where ``\hat{\mathbf{C}} = \mathrm{det}(\mathbf{C})^{-1/3} \mathbf{C}`` and ``J = \det(\mathbf{F}) = \sqrt{\det(\mathbf{C})}``, and the shear and bulk modulus are given by ``\mu`` and ``K_b`` respectively. + +This free energy function can be implemented in `Tensors` as: + +```julia +function Ψ(C, μ, Kb) + detC = det(C) + J = sqrt(detC) + Ĉ = detC^(-1/3)*C + return 1/2*(μ * (tr(Ĉ)- 3) + Kb*(J-1)^2) +end +``` + +The analytical expression for the Second Piola Kirchoff tensor is + +```math +\mathbf{S} = \mu \det(\mathbf{C})^{-1/3}(\mathbf{I} - 1/3 \mathrm{tr}(\mathbf{C})\mathbf{C}^{-1}) + K_b(J-1)J\mathbf{C}^{-1} +``` + +which can be implemented by the function + +```julia +function S(C, μ, Kb) + I = one(C) + J = sqrt(det(C)) + invC = inv(C) + return μ * det(C)^(-1/3)*(I - 1/3*tr(C)*invC) + Kb*(J-1)*J*invC +end +``` + +### Automatic differentiation + +For some material models it can be cumbersome to compute the analytical expression for the Second Piola Kirchoff tensor. We can then instead use Automatic Differentiation (AD). Below is an example which computes the Second Piola Kirchoff tensor using AD and compares it to the analytical answer. + +```@meta +DocTestSetup = quote + using Random + Random.seed!(1234) + using Tensors + E = 200e9 + ν = 0.3 + dim = 2 + λ = E*ν / ((1 + ν) * (1 - 2ν)) + μ = E / (2(1 + ν)) + δ(i,j) = i == j ? 1.0 : 0.0 + f = (i,j,k,l) -> λ*δ(i,j)*δ(k,l) + μ*(δ(i,k)*δ(j,l) + δ(i,l)*δ(j,k)) + + C = SymmetricTensor{4, dim}(f) + + function Ψ(C, μ, Kb) + detC = det(C) + J = sqrt(detC) + Ĉ = detC^(-1/3)*C + return 1/2*(μ * (tr(Ĉ)- 3) + Kb*(J-1)^2) + end + + function S(C, μ, Kb) + I = one(C) + J = sqrt(det(C)) + invC = inv(C) + return μ * det(C)^(-1/3)*(I - 1/3*tr(C)*invC) + Kb*(J-1)*J*invC + end +end +``` + +```jldoctest +julia> μ = 1e10; + +julia> Kb = 1.66e11; + +julia> F = one(Tensor{2,3}) + rand(Tensor{2,3}); + +julia> C = tdot(F); + +julia> S_AD = 2 * gradient(C -> Ψ(C, μ, Kb), C) +3×3 SymmetricTensor{2, 3, Float64, 6}: + 4.30534e11 -2.30282e11 -8.52861e10 + -2.30282e11 4.38793e11 -2.64481e11 + -8.52861e10 -2.64481e11 7.85515e11 + +julia> S(C, μ, Kb) +3×3 SymmetricTensor{2, 3, Float64, 6}: + 4.30534e11 -2.30282e11 -8.52861e10 + -2.30282e11 4.38793e11 -2.64481e11 + -8.52861e10 -2.64481e11 7.85515e11 +``` diff --git a/docs/src/index.md b/docs/src/index.md index ee7aa962..5b0f8624 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -1,48 +1,48 @@ -# Tensors - -*Efficient computations with symmetric and non-symmetric tensors in Julia.* - -## Introduction - -This Julia package provides fast operations with symmetric and non-symmetric tensors of order 1, 2 and 4. -The Tensors are allocated on the stack which means that there is no need to preallocate output results for performance. -Unicode infix operators are provided such that the tensor expression in the source code is similar to the one written with mathematical notation. -When possible, symmetry of tensors is exploited for better performance. -Supports Automatic Differentiation to easily compute first and second order derivatives of tensorial functions. - -## Installation - -`Tensors` is a registered package and so can be installed via - -```julia -Pkg.add("Tensors") -``` - -## Manual Outline - -```@contents -Pages = [ - "man/constructing_tensors.md", - "man/indexing.md", - "man/binary_operators.md", - "man/other_operators.md", - "man/storing_tensors.md", - "man/automatic_differentiation.md", -] -Depth = 1 -``` - -## Benchmarks - -```@contents -Pages = [ - "benchmarks.md"] -``` - -## Demos - -```@contents -Pages = [ - "demos.md"] -Depth = 1 -``` +# Tensors + +*Efficient computations with symmetric and non-symmetric tensors in Julia.* + +## Introduction + +This Julia package provides fast operations with symmetric and non-symmetric tensors of order 1, 2 and 4. +The Tensors are allocated on the stack which means that there is no need to preallocate output results for performance. +Unicode infix operators are provided such that the tensor expression in the source code is similar to the one written with mathematical notation. +When possible, symmetry of tensors is exploited for better performance. +Supports Automatic Differentiation to easily compute first and second order derivatives of tensorial functions. + +## Installation + +`Tensors` is a registered package and so can be installed via + +```julia +Pkg.add("Tensors") +``` + +## Manual Outline + +```@contents +Pages = [ + "man/constructing_tensors.md", + "man/indexing.md", + "man/binary_operators.md", + "man/other_operators.md", + "man/storing_tensors.md", + "man/automatic_differentiation.md", +] +Depth = 1 +``` + +## Benchmarks + +```@contents +Pages = [ + "benchmarks.md"] +``` + +## Demos + +```@contents +Pages = [ + "demos.md"] +Depth = 1 +``` diff --git a/docs/src/man/automatic_differentiation.md b/docs/src/man/automatic_differentiation.md index a09c91b6..a045ee32 100644 --- a/docs/src/man/automatic_differentiation.md +++ b/docs/src/man/automatic_differentiation.md @@ -1,154 +1,154 @@ -```@meta -DocTestSetup = quote - using Random - Random.seed!(1234) - using Tensors -end -``` - -# Automatic Differentiation - -```@index -Pages = ["automatic_differentiation.md"] -``` - -`Tensors` supports forward mode automatic differentiation (AD) of tensorial functions to compute first order derivatives (gradients) and second order derivatives (Hessians). -It does this by exploiting the `Dual` number defined in `ForwardDiff.jl`. -While `ForwardDiff.jl` can itself be used to differentiate tensor functions it is a bit awkward because `ForwardDiff.jl` is written to work with standard Julia `Array`s. One therefore has to send the input argument as an `Array` to `ForwardDiff.jl`, convert it to a `Tensor` and then convert the output `Array` to a `Tensor` again. This can also be inefficient since these `Array`s are allocated on the heap so one needs to preallocate which can be annoying. - -Instead, it is simpler to use `Tensors` own AD API to do the differentiation. This does not require any conversions and everything will be stack allocated so there is no need to preallocate. - -The API for AD in `Tensors` is `gradient(f, A)` and `hessian(f, A)` where `f` is a function and `A` is a first or second order tensor. For `gradient` the function can return a scalar, vector (in case the input is a vector) or a second order tensor. For `hessian` the function should return a scalar. - -When evaluating the function with dual numbers, the value (value and gradient in the case of hessian) is obtained automatically, along with the gradient. To obtain the lower order results `gradient` and `hessian` accepts a third arguement, a `Symbol`. Note that the symbol is only used to dispatch to the correct function, and thus it can be any symbol. In the examples the symbol `:all` is used to obtain all the lower order derivatives and values. - -```@docs -gradient -hessian -divergence -curl -laplace -``` - -## Examples - -We here give a few examples of differentiating various functions and compare with the analytical solution. - -### Norm of a vector - -```math -f(\mathbf{x}) = |\mathbf{x}| \quad \Rightarrow \quad \partial f / \partial \mathbf{x} = \mathbf{x} / |\mathbf{x}| -``` - -```jldoctest -julia> x = rand(Vec{2}); - -julia> gradient(norm, x) -2-element Vec{2, Float64}: - 0.6103600560550116 - 0.7921241076829584 - -julia> x / norm(x) -2-element Vec{2, Float64}: - 0.6103600560550116 - 0.7921241076829584 -``` - -### Determinant of a second order symmetric tensor - -```math -f(\mathbf{A}) = \det \mathbf{A} \quad \Rightarrow \quad \partial f / \partial \mathbf{A} = \mathbf{A}^{-T} \det \mathbf{A} -``` - -```jldoctest -julia> A = rand(SymmetricTensor{2,2}); - -julia> gradient(det, A) -2×2 SymmetricTensor{2, 2, Float64, 3}: - 0.566237 -0.766797 - -0.766797 0.590845 - -julia> inv(A)' * det(A) -2×2 SymmetricTensor{2, 2, Float64, 3}: - 0.566237 -0.766797 - -0.766797 0.590845 -``` - -### Hessian of a quadratic potential - -```math -\psi(\mathbf{e}) = 1/2 \mathbf{e} : \mathsf{E} : \mathbf{e} \quad \Rightarrow \quad \partial \psi / (\partial \mathbf{e} \otimes \partial \mathbf{e}) = \mathsf{E}^\text{sym} -``` - -where ``\mathsf{E}^\text{sym}`` is the major symmetric part of ``\mathsf{E}``. - -```jldoctest -julia> E = rand(Tensor{4,2}); - -julia> ψ(ϵ) = 1/2 * ϵ ⊡ E ⊡ ϵ; - -julia> E_sym = hessian(ψ, rand(Tensor{2,2})); - -julia> norm(majorsymmetric(E) - E_sym) -0.0 -``` - -## Inserting a known derivative -When conditionals are used in a function evaluation, automatic differentiation -may yield the wrong result. Consider, the simplified example of the function -`f(x) = is_zero(x) ? zero(x) : sin(x)`. If evaluated at `x=0`, the returning -of `zero(x)` gives a zero derivative because `zero(x)` is constant, while the -correct value is 1. In such cases, it is possible to insert a known -derivative of a function which is part of a larger function to be -automatically differentiated. - -Another use case is when the analytical derivative can be computed much more -efficiently than the automatically differentiatiated derivative. - -```@docs -@implement_gradient -``` - -### Example -Lets consider the function ``h(\mathbf{f}(\mathbf{g}(\mathbf{x})))`` -where `h(x)=norm(x)`, `f(x)=x ⋅ x`, and `g(x)=dev(x)`. For `f(x)` we -then have the analytical derivative -```math -\frac{\partial f_{ij}}{\partial x_{kl}} = \delta_{ik} x_{lj} + x_{ik} \delta_{jl} -``` -which we can insert into our known analytical derivative using the - `@implement_gradient` macro. Below, we compare with the result when - the full derivative is calculated using automatic differentiation. - -```jldoctest -# Define functions -h(x) = norm(x) -f1(x) = x ⋅ x -f2(x) = f1(x) -g(x) = dev(x) - -# Define composed functions -cfun1(x) = h(f1(g(x))) -cfun2(x) = h(f2(g(x))) - -# Define known derivative -function df2dx(x::Tensor{2,dim}) where{dim} - println("Hello from df2dx") # Show that df2dx is called - fval = f2(x) - I2 = one(Tensor{2,dim}) - dfdx_val = otimesu(I2, transpose(x)) + otimesu(x, I2) - return fval, dfdx_val -end - -# Implement known derivative -@implement_gradient f2 df2dx - -# Calculate gradients -x = rand(Tensor{2,2}) - -gradient(cfun1, x) ≈ gradient(cfun2, x) - -# output -Hello from df2dx -true +```@meta +DocTestSetup = quote + using Random + Random.seed!(1234) + using Tensors +end +``` + +# Automatic Differentiation + +```@index +Pages = ["automatic_differentiation.md"] +``` + +`Tensors` supports forward mode automatic differentiation (AD) of tensorial functions to compute first order derivatives (gradients) and second order derivatives (Hessians). +It does this by exploiting the `Dual` number defined in `ForwardDiff.jl`. +While `ForwardDiff.jl` can itself be used to differentiate tensor functions it is a bit awkward because `ForwardDiff.jl` is written to work with standard Julia `Array`s. One therefore has to send the input argument as an `Array` to `ForwardDiff.jl`, convert it to a `Tensor` and then convert the output `Array` to a `Tensor` again. This can also be inefficient since these `Array`s are allocated on the heap so one needs to preallocate which can be annoying. + +Instead, it is simpler to use `Tensors` own AD API to do the differentiation. This does not require any conversions and everything will be stack allocated so there is no need to preallocate. + +The API for AD in `Tensors` is `gradient(f, A)` and `hessian(f, A)` where `f` is a function and `A` is a first or second order tensor. For `gradient` the function can return a scalar, vector (in case the input is a vector) or a second order tensor. For `hessian` the function should return a scalar. + +When evaluating the function with dual numbers, the value (value and gradient in the case of hessian) is obtained automatically, along with the gradient. To obtain the lower order results `gradient` and `hessian` accepts a third arguement, a `Symbol`. Note that the symbol is only used to dispatch to the correct function, and thus it can be any symbol. In the examples the symbol `:all` is used to obtain all the lower order derivatives and values. + +```@docs +gradient +hessian +divergence +curl +laplace +``` + +## Examples + +We here give a few examples of differentiating various functions and compare with the analytical solution. + +### Norm of a vector + +```math +f(\mathbf{x}) = |\mathbf{x}| \quad \Rightarrow \quad \partial f / \partial \mathbf{x} = \mathbf{x} / |\mathbf{x}| +``` + +```jldoctest +julia> x = rand(Vec{2}); + +julia> gradient(norm, x) +2-element Vec{2, Float64}: + 0.6103600560550116 + 0.7921241076829584 + +julia> x / norm(x) +2-element Vec{2, Float64}: + 0.6103600560550116 + 0.7921241076829584 +``` + +### Determinant of a second order symmetric tensor + +```math +f(\mathbf{A}) = \det \mathbf{A} \quad \Rightarrow \quad \partial f / \partial \mathbf{A} = \mathbf{A}^{-T} \det \mathbf{A} +``` + +```jldoctest +julia> A = rand(SymmetricTensor{2,2}); + +julia> gradient(det, A) +2×2 SymmetricTensor{2, 2, Float64, 3}: + 0.566237 -0.766797 + -0.766797 0.590845 + +julia> inv(A)' * det(A) +2×2 SymmetricTensor{2, 2, Float64, 3}: + 0.566237 -0.766797 + -0.766797 0.590845 +``` + +### Hessian of a quadratic potential + +```math +\psi(\mathbf{e}) = 1/2 \mathbf{e} : \mathsf{E} : \mathbf{e} \quad \Rightarrow \quad \partial \psi / (\partial \mathbf{e} \otimes \partial \mathbf{e}) = \mathsf{E}^\text{sym} +``` + +where ``\mathsf{E}^\text{sym}`` is the major symmetric part of ``\mathsf{E}``. + +```jldoctest +julia> E = rand(Tensor{4,2}); + +julia> ψ(ϵ) = 1/2 * ϵ ⊡ E ⊡ ϵ; + +julia> E_sym = hessian(ψ, rand(Tensor{2,2})); + +julia> norm(majorsymmetric(E) - E_sym) +0.0 +``` + +## Inserting a known derivative +When conditionals are used in a function evaluation, automatic differentiation +may yield the wrong result. Consider, the simplified example of the function +`f(x) = is_zero(x) ? zero(x) : sin(x)`. If evaluated at `x=0`, the returning +of `zero(x)` gives a zero derivative because `zero(x)` is constant, while the +correct value is 1. In such cases, it is possible to insert a known +derivative of a function which is part of a larger function to be +automatically differentiated. + +Another use case is when the analytical derivative can be computed much more +efficiently than the automatically differentiatiated derivative. + +```@docs +@implement_gradient +``` + +### Example +Lets consider the function ``h(\mathbf{f}(\mathbf{g}(\mathbf{x})))`` +where `h(x)=norm(x)`, `f(x)=x ⋅ x`, and `g(x)=dev(x)`. For `f(x)` we +then have the analytical derivative +```math +\frac{\partial f_{ij}}{\partial x_{kl}} = \delta_{ik} x_{lj} + x_{ik} \delta_{jl} +``` +which we can insert into our known analytical derivative using the + `@implement_gradient` macro. Below, we compare with the result when + the full derivative is calculated using automatic differentiation. + +```jldoctest +# Define functions +h(x) = norm(x) +f1(x) = x ⋅ x +f2(x) = f1(x) +g(x) = dev(x) + +# Define composed functions +cfun1(x) = h(f1(g(x))) +cfun2(x) = h(f2(g(x))) + +# Define known derivative +function df2dx(x::Tensor{2,dim}) where{dim} + println("Hello from df2dx") # Show that df2dx is called + fval = f2(x) + I2 = one(Tensor{2,dim}) + dfdx_val = otimesu(I2, transpose(x)) + otimesu(x, I2) + return fval, dfdx_val +end + +# Implement known derivative +@implement_gradient f2 df2dx + +# Calculate gradients +x = rand(Tensor{2,2}) + +gradient(cfun1, x) ≈ gradient(cfun2, x) + +# output +Hello from df2dx +true ``` \ No newline at end of file diff --git a/docs/src/man/binary_operators.md b/docs/src/man/binary_operators.md index 946ea2e9..5fa0219e 100644 --- a/docs/src/man/binary_operators.md +++ b/docs/src/man/binary_operators.md @@ -1,83 +1,83 @@ -```@meta -DocTestSetup = quote - using Random - Random.seed!(1234) - using Tensors -end -``` - -# Binary Operations - -```@index -Pages = ["binary_operators.md"] -``` - -## Dot product (single contraction) - -The dot product (or single contraction) between a tensor of order `n` and a tensor of order `m` is a tensor of order `m + n - 2`. For example, single contraction between two vectors ``\mathbf{b}`` and ``\mathbf{c}`` can be written as: - -```math -a = \mathbf{b} \cdot \mathbf{c} \Leftrightarrow a = b_i c_i -``` - -and single contraction between a second order tensor ``\mathbf{B}`` and a vector ``\mathbf{c}``: - -```math -\mathbf{a} = \mathbf{B} \cdot \mathbf{c} \Leftrightarrow a_i = B_{ij} c_j -``` - -```@docs -dot -``` - -## Double contraction - -A double contraction between two tensors contracts the two most inner indices. The result of a double contraction between a tensor of order `n` and a tensor of order `m` is a tensor of order `m + n - 4`. For example, double contraction between two second order tensors ``\mathbf{B}`` and ``\mathbf{C}`` can be written as: - -```math -a = \mathbf{B} : \mathbf{C} \Leftrightarrow a = B_{ij} C_{ij} -``` - -and double contraction between a fourth order tensor ``\mathsf{B}`` and a second order tensor ``\mathbf{C}``: - -```math -\mathbf{A} = \mathsf{B} : \mathbf{C} \Leftrightarrow A_{ij} = B_{ijkl} C_{kl} -``` - -```@docs -dcontract -``` - -## Tensor product (open product) - -The tensor product (or open product) between a tensor of order `n` and a tensor of order `m` is a tensor of order `m + n`. For example, open product between two vectors ``\mathbf{b}`` and ``\mathbf{c}`` can be written as: - -```math -\mathbf{A} = \mathbf{b} \otimes \mathbf{c} \Leftrightarrow A_{ij} = b_i c_j -``` - -and open product between two second order tensors ``\mathbf{B}`` and ``\mathbf{C}``: - -```math -\mathsf{A} = \mathbf{B} \otimes \mathbf{C} \Leftrightarrow A_{ijkl} = B_{ij} C_{kl} -``` - -```@docs -otimes -``` - -### Permuted tensor products - -Two commonly used permutations of the open product are the "upper" open product -(``\bar{\otimes}``) and "lower" open product (``\underline{\otimes}``) defined -between second order tensors ``\mathbf{B}`` and ``\mathbf{C}`` as - -```math -\mathsf{A} = \mathbf{B} \bar{\otimes} \mathbf{C} \Leftrightarrow A_{ijkl} = B_{ik} C_{jl}\\ -\mathsf{A} = \mathbf{B} \underline{\otimes} \mathbf{C} \Leftrightarrow A_{ijkl} = B_{il} C_{jk} -``` - -```@docs -otimesu -otimesl -``` +```@meta +DocTestSetup = quote + using Random + Random.seed!(1234) + using Tensors +end +``` + +# Binary Operations + +```@index +Pages = ["binary_operators.md"] +``` + +## Dot product (single contraction) + +The dot product (or single contraction) between a tensor of order `n` and a tensor of order `m` is a tensor of order `m + n - 2`. For example, single contraction between two vectors ``\mathbf{b}`` and ``\mathbf{c}`` can be written as: + +```math +a = \mathbf{b} \cdot \mathbf{c} \Leftrightarrow a = b_i c_i +``` + +and single contraction between a second order tensor ``\mathbf{B}`` and a vector ``\mathbf{c}``: + +```math +\mathbf{a} = \mathbf{B} \cdot \mathbf{c} \Leftrightarrow a_i = B_{ij} c_j +``` + +```@docs +dot +``` + +## Double contraction + +A double contraction between two tensors contracts the two most inner indices. The result of a double contraction between a tensor of order `n` and a tensor of order `m` is a tensor of order `m + n - 4`. For example, double contraction between two second order tensors ``\mathbf{B}`` and ``\mathbf{C}`` can be written as: + +```math +a = \mathbf{B} : \mathbf{C} \Leftrightarrow a = B_{ij} C_{ij} +``` + +and double contraction between a fourth order tensor ``\mathsf{B}`` and a second order tensor ``\mathbf{C}``: + +```math +\mathbf{A} = \mathsf{B} : \mathbf{C} \Leftrightarrow A_{ij} = B_{ijkl} C_{kl} +``` + +```@docs +dcontract +``` + +## Tensor product (open product) + +The tensor product (or open product) between a tensor of order `n` and a tensor of order `m` is a tensor of order `m + n`. For example, open product between two vectors ``\mathbf{b}`` and ``\mathbf{c}`` can be written as: + +```math +\mathbf{A} = \mathbf{b} \otimes \mathbf{c} \Leftrightarrow A_{ij} = b_i c_j +``` + +and open product between two second order tensors ``\mathbf{B}`` and ``\mathbf{C}``: + +```math +\mathsf{A} = \mathbf{B} \otimes \mathbf{C} \Leftrightarrow A_{ijkl} = B_{ij} C_{kl} +``` + +```@docs +otimes +``` + +### Permuted tensor products + +Two commonly used permutations of the open product are the "upper" open product +(``\bar{\otimes}``) and "lower" open product (``\underline{\otimes}``) defined +between second order tensors ``\mathbf{B}`` and ``\mathbf{C}`` as + +```math +\mathsf{A} = \mathbf{B} \bar{\otimes} \mathbf{C} \Leftrightarrow A_{ijkl} = B_{ik} C_{jl}\\ +\mathsf{A} = \mathbf{B} \underline{\otimes} \mathbf{C} \Leftrightarrow A_{ijkl} = B_{il} C_{jk} +``` + +```@docs +otimesu +otimesl +``` diff --git a/docs/src/man/constructing_tensors.md b/docs/src/man/constructing_tensors.md index f6337a67..cdec0802 100644 --- a/docs/src/man/constructing_tensors.md +++ b/docs/src/man/constructing_tensors.md @@ -1,224 +1,224 @@ -```@meta -DocTestSetup = quote - using Random - Random.seed!(1234) - using Tensors -end -``` - -# Constructing tensors - -Tensors can be created in multiple ways but they usually include running a function on tensor types of which there are two kinds, `Tensor{order, dim, T}` for non-symmetric tensors and `SymmetricTensor{order, dim, T}` for symmetric tensors. -The parameter `order` is an integer of value 1, 2 or 4, excluding 1 for symmetric tensors. The second parameter `dim` is an integer which corresponds to the dimension of the tensor and can be 1, 2 or 3. The last parameter `T` is the number type that the tensors contain, i.e. `Float64` or `Float32`. - -```@docs -Tensors.Tensor -Tensors.SymmetricTensor -``` - -## [Zero tensors](@id zero_tensors) - -A tensor with only zeros is created using the function `zero`, applied to the type of tensor that should be created: - -```jldoctest -julia> zero(Tensor{1, 2}) -2-element Vec{2, Float64}: - 0.0 - 0.0 -``` - -By default, a tensor of `Float64`s is created, but by explicitly giving the `T` parameter this can be changed: - -```jldoctest -julia> zero(SymmetricTensor{4, 2, Float32}) -2×2×2×2 SymmetricTensor{4, 2, Float32, 9}: -[:, :, 1, 1] = - 0.0 0.0 - 0.0 0.0 - -[:, :, 2, 1] = - 0.0 0.0 - 0.0 0.0 - -[:, :, 1, 2] = - 0.0 0.0 - 0.0 0.0 - -[:, :, 2, 2] = - 0.0 0.0 - 0.0 0.0 -``` - -A Julia `Array` with zeroed tensors can be created with `zeros`, with the tensor type and dimensions of the array as arguments: - -```jldoctest -julia> zeros(Tensor{2,2}, 2, 3) -2×3 Matrix{Tensor{2, 2, Float64, 4}}: - [0.0 0.0; 0.0 0.0] [0.0 0.0; 0.0 0.0] [0.0 0.0; 0.0 0.0] - [0.0 0.0; 0.0 0.0] [0.0 0.0; 0.0 0.0] [0.0 0.0; 0.0 0.0] -``` - -## Constant tensors - -A tensor filled with ones is created using the function `ones`, applied to the type of tensor that should be created: - -```jldoctest -julia> ones(Tensor{2,2}) -2×2 Tensor{2, 2, Float64, 4}: - 1.0 1.0 - 1.0 1.0 -``` - -By default, a tensor of `Float64`s is created, but by explicitly giving the `T` parameter this can be changed, like for [zero](@ref zero_tensors). - -!!! note - The function `ones` has double meaning: it can create a tensor filled with ones - (as described above) or create a Julia `Array` with [identity tensors](@ref identity_tensors). - Thus, to create an `Array` with tensors filled with ones, instead use array comprehension: - - ```jldoctest - julia> [ones(Tensor{2,2}) for i in 1:2, j in 1:3] - 2×3 Matrix{Tensor{2, 2, Float64, 4}}: - [1.0 1.0; 1.0 1.0] [1.0 1.0; 1.0 1.0] [1.0 1.0; 1.0 1.0] - [1.0 1.0; 1.0 1.0] [1.0 1.0; 1.0 1.0] [1.0 1.0; 1.0 1.0] - ``` - - -## Random tensors - -A tensor with random numbers is created using the function `rand`, applied to the type of tensor that should be created: - -```jldoctest -julia> rand(Tensor{2, 3}) -3×3 Tensor{2, 3, Float64, 9}: - 0.590845 0.460085 0.200586 - 0.766797 0.794026 0.298614 - 0.566237 0.854147 0.246837 -``` - -By specifying the type, `T`, a tensor of different type can be obtained: - -```jldoctest -julia> rand(SymmetricTensor{2,3,Float32}) -3×3 SymmetricTensor{2, 3, Float32, 6}: - 0.0107703 0.305865 0.2082 - 0.305865 0.405684 0.257278 - 0.2082 0.257278 0.958491 -``` - -## [Identity tensors](@id identity_tensors) - -Identity tensors can be created for orders 2 and 4. The components of the second order identity tensor ``\mathbf{I}`` are defined as ``I_{ij} = \delta_{ij}``, where ``\delta_{ij}`` is the Kronecker delta. The fourth order identity tensor ``\mathsf{I}`` is the resulting tensor from taking the derivative of a second order tensor ``\mathbf{A}`` with itself: - -```math -\mathsf{I} = \frac{\partial \mathbf{A}}{\partial \mathbf{A}} \Leftrightarrow I_{ijkl} = \frac{\partial A_{ij}}{\partial A_{kl}} = \delta_{ik} \delta_{jl}. -``` - -The symmetric fourth order tensor, ``\mathsf{I}^\text{sym}``, is the resulting tensor from taking the derivative of a symmetric second order tensor ``\mathbf{A}^\text{sym}`` with itself: - -```math -\mathsf{I}^\text{sym} = \frac{\partial \mathbf{A}^\text{sym}}{\partial \mathbf{A}^\text{sym}} \Leftrightarrow I^\text{sym}_{ijkl} = \frac{\partial A^\text{sym}_{ij}}{\partial A^\text{sym}_{kl}} = \frac{1}{2} (\delta_{ik} \delta_{jl} + \delta_{il} \delta_{jk}). -``` - -Identity tensors are created using the function `one`, applied to the type of tensor that should be created: - -```jldoctest -julia> one(SymmetricTensor{2, 2}) -2×2 SymmetricTensor{2, 2, Float64, 3}: - 1.0 0.0 - 0.0 1.0 -``` - -A Julia `Array` with identity tensors can be created with `ones`, with the tensor type and dimensions of the array as arguments: - -```jldoctest -julia> ones(Tensor{2,2}, 2, 2) -2×2 Matrix{Tensor{2, 2, Float64, 4}}: - [1.0 0.0; 0.0 1.0] [1.0 0.0; 0.0 1.0] - [1.0 0.0; 0.0 1.0] [1.0 0.0; 0.0 1.0] -``` - -## Base vectors - -```@docs -basevec -``` - -## From arrays / tuples - -Tensors can also be created from a tuple or an array with the same number of elements as the number of independent indices in the tensor. For example, a first order tensor (vector) in two dimensions is here created from a vector of length two: - -```jldoctest -julia> Tensor{1,2}([1.0,2.0]) -2-element Vec{2, Float64}: - 1.0 - 2.0 -``` - -Below, a second order symmetric tensor in two dimensions is created from a tuple. Since the number of independent indices in this tensor is three, the length of the tuple is also three. For symmetric tensors, the order of the numbers in the input tuple is column by column, starting at the diagonal. - -```jldoctest -julia> SymmetricTensor{2,2}((1.0,2.0,3.0)) -2×2 SymmetricTensor{2, 2, Float64, 3}: - 1.0 2.0 - 2.0 3.0 -``` - -## [From a function](@id function_index) - -A tensor can be created from a function `f(indices...) -> v` which maps a set of indices to a value. The number of arguments of the function should be equal to the order of the tensor. - -```jldoctest -julia> SymmetricTensor{2,2,Float64}((i,j) -> i + j) -2×2 SymmetricTensor{2, 2, Float64, 3}: - 2.0 3.0 - 3.0 4.0 -``` - -For symmetric tensors, the function is only called for the lower triangular part. - -## Diagonal tensors - -A diagonal second order tensor can be created by either giving a number or a vector that should appear on the diagonal: - -```jldoctest -julia> diagm(Tensor{2,2}, 2.0) -2×2 Tensor{2, 2, Float64, 4}: - 2.0 0.0 - 0.0 2.0 - -julia> diagm(SymmetricTensor{2,3}, [1.0, 2.0, 3.0]) -3×3 SymmetricTensor{2, 3, Float64, 6}: - 1.0 0.0 0.0 - 0.0 2.0 0.0 - 0.0 0.0 3.0 -``` - -## Converting to tensors - -Sometimes it is necessary to convert between standard Julia `Array`'s and `Tensor`'s. When the number type is a bits type (like for floats or integers) this is conveniently done by the `reinterpret` function. For example, a `2×5` Julia `Array` can be translated to a vector of `Vec{2}` with the -following code - -```jldoctest fromarray -julia> data = rand(2, 5) -2×5 Matrix{Float64}: - 0.590845 0.566237 0.794026 0.200586 0.246837 - 0.766797 0.460085 0.854147 0.298614 0.579672 - -julia> tensor_data = reinterpret(Vec{2, Float64}, vec(data)) -5-element reinterpret(Vec{2, Float64}, ::Vector{Float64}): - [0.5908446386657102, 0.7667970365022592] - [0.5662374165061859, 0.4600853424625171] - [0.7940257103317943, 0.8541465903790502] - [0.20058603493384108, 0.2986142783434118] - [0.24683718661000897, 0.5796722333690416] -``` - -The data can also be reinterpreted back to a Julia `Array` - -```jldoctest fromarray -julia> data = reshape(reinterpret(Float64, tensor_data), (2, 5)) -2×5 Matrix{Float64}: - 0.590845 0.566237 0.794026 0.200586 0.246837 - 0.766797 0.460085 0.854147 0.298614 0.579672 -``` +```@meta +DocTestSetup = quote + using Random + Random.seed!(1234) + using Tensors +end +``` + +# Constructing tensors + +Tensors can be created in multiple ways but they usually include running a function on tensor types of which there are two kinds, `Tensor{order, dim, T}` for non-symmetric tensors and `SymmetricTensor{order, dim, T}` for symmetric tensors. +The parameter `order` is an integer of value 1, 2 or 4, excluding 1 for symmetric tensors. The second parameter `dim` is an integer which corresponds to the dimension of the tensor and can be 1, 2 or 3. The last parameter `T` is the number type that the tensors contain, i.e. `Float64` or `Float32`. + +```@docs +Tensors.Tensor +Tensors.SymmetricTensor +``` + +## [Zero tensors](@id zero_tensors) + +A tensor with only zeros is created using the function `zero`, applied to the type of tensor that should be created: + +```jldoctest +julia> zero(Tensor{1, 2}) +2-element Vec{2, Float64}: + 0.0 + 0.0 +``` + +By default, a tensor of `Float64`s is created, but by explicitly giving the `T` parameter this can be changed: + +```jldoctest +julia> zero(SymmetricTensor{4, 2, Float32}) +2×2×2×2 SymmetricTensor{4, 2, Float32, 9}: +[:, :, 1, 1] = + 0.0 0.0 + 0.0 0.0 + +[:, :, 2, 1] = + 0.0 0.0 + 0.0 0.0 + +[:, :, 1, 2] = + 0.0 0.0 + 0.0 0.0 + +[:, :, 2, 2] = + 0.0 0.0 + 0.0 0.0 +``` + +A Julia `Array` with zeroed tensors can be created with `zeros`, with the tensor type and dimensions of the array as arguments: + +```jldoctest +julia> zeros(Tensor{2,2}, 2, 3) +2×3 Matrix{Tensor{2, 2, Float64, 4}}: + [0.0 0.0; 0.0 0.0] [0.0 0.0; 0.0 0.0] [0.0 0.0; 0.0 0.0] + [0.0 0.0; 0.0 0.0] [0.0 0.0; 0.0 0.0] [0.0 0.0; 0.0 0.0] +``` + +## Constant tensors + +A tensor filled with ones is created using the function `ones`, applied to the type of tensor that should be created: + +```jldoctest +julia> ones(Tensor{2,2}) +2×2 Tensor{2, 2, Float64, 4}: + 1.0 1.0 + 1.0 1.0 +``` + +By default, a tensor of `Float64`s is created, but by explicitly giving the `T` parameter this can be changed, like for [zero](@ref zero_tensors). + +!!! note + The function `ones` has double meaning: it can create a tensor filled with ones + (as described above) or create a Julia `Array` with [identity tensors](@ref identity_tensors). + Thus, to create an `Array` with tensors filled with ones, instead use array comprehension: + + ```jldoctest + julia> [ones(Tensor{2,2}) for i in 1:2, j in 1:3] + 2×3 Matrix{Tensor{2, 2, Float64, 4}}: + [1.0 1.0; 1.0 1.0] [1.0 1.0; 1.0 1.0] [1.0 1.0; 1.0 1.0] + [1.0 1.0; 1.0 1.0] [1.0 1.0; 1.0 1.0] [1.0 1.0; 1.0 1.0] + ``` + + +## Random tensors + +A tensor with random numbers is created using the function `rand`, applied to the type of tensor that should be created: + +```jldoctest +julia> rand(Tensor{2, 3}) +3×3 Tensor{2, 3, Float64, 9}: + 0.590845 0.460085 0.200586 + 0.766797 0.794026 0.298614 + 0.566237 0.854147 0.246837 +``` + +By specifying the type, `T`, a tensor of different type can be obtained: + +```jldoctest +julia> rand(SymmetricTensor{2,3,Float32}) +3×3 SymmetricTensor{2, 3, Float32, 6}: + 0.0107703 0.305865 0.2082 + 0.305865 0.405684 0.257278 + 0.2082 0.257278 0.958491 +``` + +## [Identity tensors](@id identity_tensors) + +Identity tensors can be created for orders 2 and 4. The components of the second order identity tensor ``\mathbf{I}`` are defined as ``I_{ij} = \delta_{ij}``, where ``\delta_{ij}`` is the Kronecker delta. The fourth order identity tensor ``\mathsf{I}`` is the resulting tensor from taking the derivative of a second order tensor ``\mathbf{A}`` with itself: + +```math +\mathsf{I} = \frac{\partial \mathbf{A}}{\partial \mathbf{A}} \Leftrightarrow I_{ijkl} = \frac{\partial A_{ij}}{\partial A_{kl}} = \delta_{ik} \delta_{jl}. +``` + +The symmetric fourth order tensor, ``\mathsf{I}^\text{sym}``, is the resulting tensor from taking the derivative of a symmetric second order tensor ``\mathbf{A}^\text{sym}`` with itself: + +```math +\mathsf{I}^\text{sym} = \frac{\partial \mathbf{A}^\text{sym}}{\partial \mathbf{A}^\text{sym}} \Leftrightarrow I^\text{sym}_{ijkl} = \frac{\partial A^\text{sym}_{ij}}{\partial A^\text{sym}_{kl}} = \frac{1}{2} (\delta_{ik} \delta_{jl} + \delta_{il} \delta_{jk}). +``` + +Identity tensors are created using the function `one`, applied to the type of tensor that should be created: + +```jldoctest +julia> one(SymmetricTensor{2, 2}) +2×2 SymmetricTensor{2, 2, Float64, 3}: + 1.0 0.0 + 0.0 1.0 +``` + +A Julia `Array` with identity tensors can be created with `ones`, with the tensor type and dimensions of the array as arguments: + +```jldoctest +julia> ones(Tensor{2,2}, 2, 2) +2×2 Matrix{Tensor{2, 2, Float64, 4}}: + [1.0 0.0; 0.0 1.0] [1.0 0.0; 0.0 1.0] + [1.0 0.0; 0.0 1.0] [1.0 0.0; 0.0 1.0] +``` + +## Base vectors + +```@docs +basevec +``` + +## From arrays / tuples + +Tensors can also be created from a tuple or an array with the same number of elements as the number of independent indices in the tensor. For example, a first order tensor (vector) in two dimensions is here created from a vector of length two: + +```jldoctest +julia> Tensor{1,2}([1.0,2.0]) +2-element Vec{2, Float64}: + 1.0 + 2.0 +``` + +Below, a second order symmetric tensor in two dimensions is created from a tuple. Since the number of independent indices in this tensor is three, the length of the tuple is also three. For symmetric tensors, the order of the numbers in the input tuple is column by column, starting at the diagonal. + +```jldoctest +julia> SymmetricTensor{2,2}((1.0,2.0,3.0)) +2×2 SymmetricTensor{2, 2, Float64, 3}: + 1.0 2.0 + 2.0 3.0 +``` + +## [From a function](@id function_index) + +A tensor can be created from a function `f(indices...) -> v` which maps a set of indices to a value. The number of arguments of the function should be equal to the order of the tensor. + +```jldoctest +julia> SymmetricTensor{2,2,Float64}((i,j) -> i + j) +2×2 SymmetricTensor{2, 2, Float64, 3}: + 2.0 3.0 + 3.0 4.0 +``` + +For symmetric tensors, the function is only called for the lower triangular part. + +## Diagonal tensors + +A diagonal second order tensor can be created by either giving a number or a vector that should appear on the diagonal: + +```jldoctest +julia> diagm(Tensor{2,2}, 2.0) +2×2 Tensor{2, 2, Float64, 4}: + 2.0 0.0 + 0.0 2.0 + +julia> diagm(SymmetricTensor{2,3}, [1.0, 2.0, 3.0]) +3×3 SymmetricTensor{2, 3, Float64, 6}: + 1.0 0.0 0.0 + 0.0 2.0 0.0 + 0.0 0.0 3.0 +``` + +## Converting to tensors + +Sometimes it is necessary to convert between standard Julia `Array`'s and `Tensor`'s. When the number type is a bits type (like for floats or integers) this is conveniently done by the `reinterpret` function. For example, a `2×5` Julia `Array` can be translated to a vector of `Vec{2}` with the +following code + +```jldoctest fromarray +julia> data = rand(2, 5) +2×5 Matrix{Float64}: + 0.590845 0.566237 0.794026 0.200586 0.246837 + 0.766797 0.460085 0.854147 0.298614 0.579672 + +julia> tensor_data = reinterpret(Vec{2, Float64}, vec(data)) +5-element reinterpret(Vec{2, Float64}, ::Vector{Float64}): + [0.5908446386657102, 0.7667970365022592] + [0.5662374165061859, 0.4600853424625171] + [0.7940257103317943, 0.8541465903790502] + [0.20058603493384108, 0.2986142783434118] + [0.24683718661000897, 0.5796722333690416] +``` + +The data can also be reinterpreted back to a Julia `Array` + +```jldoctest fromarray +julia> data = reshape(reinterpret(Float64, tensor_data), (2, 5)) +2×5 Matrix{Float64}: + 0.590845 0.566237 0.794026 0.200586 0.246837 + 0.766797 0.460085 0.854147 0.298614 0.579672 +``` diff --git a/docs/src/man/indexing.md b/docs/src/man/indexing.md index f9e43a45..cc92d5cf 100644 --- a/docs/src/man/indexing.md +++ b/docs/src/man/indexing.md @@ -1,57 +1,57 @@ -```@meta -DocTestSetup = quote - using Random - Random.seed!(1234) - using Tensors -end -``` - -# Indexing - -Indexing into a `(Symmetric)Tensor{dim, order}` is performed like for an `Array` of dimension `order`. - -```jldoctest -julia> A = rand(Tensor{2, 2}); - -julia> A[1, 2] -0.5662374165061859 - -julia> B = rand(SymmetricTensor{4, 2}); - -julia> B[1, 2, 1, 2] -0.24683718661000897 -``` - -Slicing will produce a `Tensor` of lower order. - -```jldoctest -julia> A = rand(Tensor{2, 2}); - -julia> A[:, 1] -2-element Vec{2, Float64}: - 0.5908446386657102 - 0.7667970365022592 -``` - -Since `Tensor`s are immutable there is no `setindex!` function defined on them. Instead, use the functionality to create tensors from functions as described [here](@ref function_index). As an example, this sets the `[1,2]` index on a tensor to one and the rest to zero: - -```jldoctest -julia> Tensor{2, 2}((i,j) -> i == 1 && j == 2 ? 1.0 : 0.0) -2×2 Tensor{2, 2, Float64, 4}: - 0.0 1.0 - 0.0 0.0 -``` - -For symmetric tensors, note that you should only set the lower triangular part of the tensor: - -```jldoctest -julia> SymmetricTensor{2, 2}((i,j) -> i == 1 && j == 2 ? 1.0 : 0.0) -2×2 SymmetricTensor{2, 2, Float64, 3}: - 0.0 0.0 - 0.0 0.0 - -julia> SymmetricTensor{2, 2}((i,j) -> i == 2 && j == 1 ? 1.0 : 0.0) -2×2 SymmetricTensor{2, 2, Float64, 3}: - 0.0 1.0 - 1.0 0.0 -``` +```@meta +DocTestSetup = quote + using Random + Random.seed!(1234) + using Tensors +end +``` + +# Indexing + +Indexing into a `(Symmetric)Tensor{dim, order}` is performed like for an `Array` of dimension `order`. + +```jldoctest +julia> A = rand(Tensor{2, 2}); + +julia> A[1, 2] +0.5662374165061859 + +julia> B = rand(SymmetricTensor{4, 2}); + +julia> B[1, 2, 1, 2] +0.24683718661000897 +``` + +Slicing will produce a `Tensor` of lower order. + +```jldoctest +julia> A = rand(Tensor{2, 2}); + +julia> A[:, 1] +2-element Vec{2, Float64}: + 0.5908446386657102 + 0.7667970365022592 +``` + +Since `Tensor`s are immutable there is no `setindex!` function defined on them. Instead, use the functionality to create tensors from functions as described [here](@ref function_index). As an example, this sets the `[1,2]` index on a tensor to one and the rest to zero: + +```jldoctest +julia> Tensor{2, 2}((i,j) -> i == 1 && j == 2 ? 1.0 : 0.0) +2×2 Tensor{2, 2, Float64, 4}: + 0.0 1.0 + 0.0 0.0 +``` + +For symmetric tensors, note that you should only set the lower triangular part of the tensor: + +```jldoctest +julia> SymmetricTensor{2, 2}((i,j) -> i == 1 && j == 2 ? 1.0 : 0.0) +2×2 SymmetricTensor{2, 2, Float64, 3}: + 0.0 0.0 + 0.0 0.0 + +julia> SymmetricTensor{2, 2}((i,j) -> i == 2 && j == 1 ? 1.0 : 0.0) +2×2 SymmetricTensor{2, 2, Float64, 3}: + 0.0 1.0 + 1.0 0.0 +``` diff --git a/docs/src/man/other_operators.md b/docs/src/man/other_operators.md index bf1f8377..25364207 100644 --- a/docs/src/man/other_operators.md +++ b/docs/src/man/other_operators.md @@ -1,299 +1,299 @@ -```@meta -DocTestSetup = quote - using Random - Random.seed!(1234) - using Tensors -end -``` - -# Other operators - -```@index -Pages = ["other_operators.md"] -``` - -## Transpose-dot - -The dot product between the transpose of a tensor with itself. Results in a symmetric tensor. - -```math -\mathbf{A} = \mathbf{B}^\text{T} \cdot \mathbf{B} \Leftrightarrow A_{ij} = B_{ki}^\text{T} B_{kj} = B_{ik} B_{kj} -``` - -```math -\mathbf{A} = \mathbf{B} \cdot \mathbf{B}^\text{T} \Leftrightarrow A_{ij} = B_{ik} B_{jk}^\text{T} = B_{ik} B_{kj} -``` - -```@docs -Tensors.tdot -Tensors.dott -``` - -## Norm - -The (2)-norm of a tensor is defined for a vector, second order tensor and fourth order tensor as - -```math -\|\mathbf{a}\| = \sqrt{\mathbf{a} \cdot \mathbf{a}} \Leftrightarrow \|a_i\| = \sqrt{a_i a_i}, -``` - -```math -\|\mathbf{A}\| = \sqrt{\mathbf{A} : \mathbf{A}} \Leftrightarrow \|A_{ij}\| = \sqrt{A_{ij} A_{ij}}, -``` - -```math -\|\mathsf{A}\| = \sqrt{\mathsf{A} :: \mathsf{A}} \Leftrightarrow \|A_{ijkl}\| = \sqrt{A_{ijkl} A_{ijkl}}. -``` - -```@docs -Tensors.norm -``` - -## Trace - -The trace for a second order tensor is defined as the sum of the diagonal elements. This can be written as - -```math -\text{tr}(\mathbf{A}) = \mathbf{I} : \mathbf{A} \Leftrightarrow \text{tr}(A_{ij}) = A_{ii}. -``` - -```@docs -Tensors.tr -``` - -## Determinant - -Determinant for a second order tensor. - -```@docs -Tensors.det -``` - -## Inverse - -Inverse of a second order tensor such that - -```math -\mathbf{A}^{-1} \cdot \mathbf{A} = \mathbf{I} -``` - -where ``\mathbf{I}`` is the second order identitiy tensor. - -```@docs -Tensors.inv -``` - -## Transpose - -Transpose of tensors is defined by changing the order of the tensor's "legs". The transpose of a vector/symmetric tensor is the vector/tensor itself. The transpose of a second order tensor can be written as: - -```math -A_{ij}^\text{T} = A_{ji} -``` - -and for a fourth order tensor the minor transpose can be written as - -```math -A_{ijkl}^\text{t} = A_{jilk} -``` - -and the major transpose as - -```math -A_{ijkl}^\text{T} = A_{klij}. -``` - -```@docs -Tensors.transpose -Tensors.minortranspose -Tensors.majortranspose -``` - -## Symmetric - -The symmetric part of a second order tensor is defined by: - -```math -\mathbf{A}^\text{sym} = \frac{1}{2}(\mathbf{A} + \mathbf{A}^\text{T}) \Leftrightarrow A_{ij}^\text{sym} = \frac{1}{2}(A_{ij} + A_{ji}), -``` - -The major symmetric part of a fourth order tensor is defined by -```math -\mathsf{A}^\text{majsym} = \frac{1}{2}(\mathsf{A} + \mathsf{A}^\text{T}) \Leftrightarrow A_{ijkl}^\text{majsym} = \frac{1}{2}(A_{ijkl} + A_{klij}). -``` - -The minor symmetric part of a fourth order tensor is defined by -```math -A_{ijkl}^\text{minsym} = \frac{1}{4}(A_{ijkl} + A_{ijlk} + A_{jikl} + A_{jilk}). -``` - -```@docs -Tensors.symmetric -Tensors.minorsymmetric -Tensors.majorsymmetric -``` - -## Skew symmetric - -The skew symmetric part of a second order tensor is defined by - -```math -\mathbf{A}^\text{skw} = \frac{1}{2}(\mathbf{A} - \mathbf{A}^\text{T}) \Leftrightarrow A^\text{skw}_{ij} = \frac{1}{2}(A_{ij} - A_{ji}). -``` - -The skew symmetric part of a symmetric tensor is zero. - -```@docs -Tensors.skew -``` - -## Deviatoric tensor - -The deviatoric part of a second order tensor is defined by - -```math -\mathbf{A}^\text{dev} = \mathbf{A} - \frac{1}{3} \mathrm{tr}[\mathbf{A}] \mathbf{I} \Leftrightarrow A_{ij}^\text{dev} = A_{ij} - \frac{1}{3}A_{kk}\delta_{ij}. -``` - -```@docs -Tensors.dev -``` - -## Volumetric tensor - -The volumetric part of a second order tensor is defined by - -```math -\mathbf{A}^\text{vol} = \frac{1}{3} \mathrm{tr}[\mathbf{A}] \mathbf{I} \Leftrightarrow A_{ij}^\text{vol} = \frac{1}{3}A_{kk}\delta_{ij}. -``` - -```@docs -Tensors.vol -``` - -## Cross product - -The cross product between two vectors is defined as - -```math -\mathbf{a} = \mathbf{b} \times \mathbf{c} \Leftrightarrow a_i = \epsilon_{ijk} b_j c_k -``` - -```@docs -Tensors.cross -``` - -## Eigenvalues and eigenvectors - -The eigenvalues and eigenvectors of a (symmetric) second order tensor, ``\mathbf{A}`` can be solved from the eigenvalue problem - -```math -\mathbf{A} \cdot \mathbf{v}_i = \lambda_i \mathbf{v}_i \qquad i = 1, \dots, \text{dim} -``` - -where ``\lambda_i`` are the eigenvalues and ``\mathbf{v}_i`` are the corresponding eigenvectors. -For a symmetric fourth order tensor, ``\mathsf{A}`` the second order eigentensors and eigenvalues -can be solved from - -```math -\mathsf{A} : \mathbf{V}_i = \lambda_i \mathbf{V}_i \qquad i = 1, \dots, \text{dim} -``` - -where ``\lambda_i`` are the eigenvalues and ``\mathbf{V}_i`` the corresponding eigentensors. - -```@docs -Tensors.eigen -Tensors.eigvals -Tensors.eigvecs -``` - -## Tensor square root - -Square root of a symmetric positive definite second order tensor ``S``, -defined such that - -```math -\sqrt{\mathbf{S}} \cdot \sqrt{\mathbf{S}} = S. -``` - -```@docs -Tensors.sqrt -``` - -## Rotations - -```@docs -Tensors.rotate -Tensors.rotation_tensor -``` - -## Special operations - -For computing a special dot product between two vectors ``\mathbf{a}`` and ``\mathbf{b}`` with a fourth order symmetric tensor ``\mathbf{C}`` such that ``a_k C_{ikjl} b_l`` there is `dotdot(a, C, b)`. This function is useful because it is the expression for the tangent matrix in continuum mechanics when the displacements are approximated by scalar shape functions. - -```@docs -Tensors.dotdot -``` - -## Voigt format - -For some operations it is convenient to easily switch to the so called "Voigt"-format. -For example when solving a local problem in a plasticity model. To simplify the conversion -between tensors and Voigt format, see [`tovoigt`](@ref), [`tovoigt!`](@ref) and -[`fromvoigt`](@ref) documented below. Care must be exercised when combined with -differentiation, see [Differentiation of Voigt format](@ref) further down. - -```@docs -Tensors.tovoigt -Tensors.tovoigt! -Tensors.fromvoigt -``` - -### Differentiation of Voigt format - -Differentiating with a Voigt representation of a symmetric tensor may lead to incorrect -results when converted back to tensors. -The `tomandel`, `tomandel!`, and `frommandel` versions of -[`tovoigt`](@ref), [`tovoigt!`](@ref), and [`fromvoigt`](@ref) can then be used. As -illustrated by the following example, this will give the correct result. In general, -however, direct differentiation of `Tensor`s is faster (see -[Automatic Differentiation](@ref)). - -```jldoctest -julia> using Tensors, ForwardDiff - -julia> fun(X::SymmetricTensor{2}) = X; - -julia> A = rand(SymmetricTensor{2,2}); - -# Differentiation of a tensor directly (correct) -julia> tovoigt(gradient(fun, A)) -3×3 Matrix{Float64}: - 1.0 0.0 0.0 - 0.0 1.0 0.0 - 0.0 0.0 0.5 - -# Converting to Voigt format, perform differentiation, convert back (WRONG!) -julia> ForwardDiff.jacobian( - v -> tovoigt(fun(fromvoigt(SymmetricTensor{2,2}, v))), - tovoigt(A) - ) -3×3 Matrix{Float64}: - 1.0 0.0 0.0 - 0.0 1.0 0.0 - 0.0 0.0 1.0 - -# Converting to Mandel format, perform differentiation, convert back (correct) -julia> tovoigt( - frommandel(SymmetricTensor{4,2}, - ForwardDiff.jacobian( - v -> tomandel(fun(frommandel(SymmetricTensor{2,2}, v))), - tomandel(A) - ) - ) - ) -3×3 Matrix{Float64}: - 1.0 0.0 0.0 - 0.0 1.0 0.0 - 0.0 0.0 0.5 -``` +```@meta +DocTestSetup = quote + using Random + Random.seed!(1234) + using Tensors +end +``` + +# Other operators + +```@index +Pages = ["other_operators.md"] +``` + +## Transpose-dot + +The dot product between the transpose of a tensor with itself. Results in a symmetric tensor. + +```math +\mathbf{A} = \mathbf{B}^\text{T} \cdot \mathbf{B} \Leftrightarrow A_{ij} = B_{ki}^\text{T} B_{kj} = B_{ik} B_{kj} +``` + +```math +\mathbf{A} = \mathbf{B} \cdot \mathbf{B}^\text{T} \Leftrightarrow A_{ij} = B_{ik} B_{jk}^\text{T} = B_{ik} B_{kj} +``` + +```@docs +Tensors.tdot +Tensors.dott +``` + +## Norm + +The (2)-norm of a tensor is defined for a vector, second order tensor and fourth order tensor as + +```math +\|\mathbf{a}\| = \sqrt{\mathbf{a} \cdot \mathbf{a}} \Leftrightarrow \|a_i\| = \sqrt{a_i a_i}, +``` + +```math +\|\mathbf{A}\| = \sqrt{\mathbf{A} : \mathbf{A}} \Leftrightarrow \|A_{ij}\| = \sqrt{A_{ij} A_{ij}}, +``` + +```math +\|\mathsf{A}\| = \sqrt{\mathsf{A} :: \mathsf{A}} \Leftrightarrow \|A_{ijkl}\| = \sqrt{A_{ijkl} A_{ijkl}}. +``` + +```@docs +Tensors.norm +``` + +## Trace + +The trace for a second order tensor is defined as the sum of the diagonal elements. This can be written as + +```math +\text{tr}(\mathbf{A}) = \mathbf{I} : \mathbf{A} \Leftrightarrow \text{tr}(A_{ij}) = A_{ii}. +``` + +```@docs +Tensors.tr +``` + +## Determinant + +Determinant for a second order tensor. + +```@docs +Tensors.det +``` + +## Inverse + +Inverse of a second order tensor such that + +```math +\mathbf{A}^{-1} \cdot \mathbf{A} = \mathbf{I} +``` + +where ``\mathbf{I}`` is the second order identitiy tensor. + +```@docs +Tensors.inv +``` + +## Transpose + +Transpose of tensors is defined by changing the order of the tensor's "legs". The transpose of a vector/symmetric tensor is the vector/tensor itself. The transpose of a second order tensor can be written as: + +```math +A_{ij}^\text{T} = A_{ji} +``` + +and for a fourth order tensor the minor transpose can be written as + +```math +A_{ijkl}^\text{t} = A_{jilk} +``` + +and the major transpose as + +```math +A_{ijkl}^\text{T} = A_{klij}. +``` + +```@docs +Tensors.transpose +Tensors.minortranspose +Tensors.majortranspose +``` + +## Symmetric + +The symmetric part of a second order tensor is defined by: + +```math +\mathbf{A}^\text{sym} = \frac{1}{2}(\mathbf{A} + \mathbf{A}^\text{T}) \Leftrightarrow A_{ij}^\text{sym} = \frac{1}{2}(A_{ij} + A_{ji}), +``` + +The major symmetric part of a fourth order tensor is defined by +```math +\mathsf{A}^\text{majsym} = \frac{1}{2}(\mathsf{A} + \mathsf{A}^\text{T}) \Leftrightarrow A_{ijkl}^\text{majsym} = \frac{1}{2}(A_{ijkl} + A_{klij}). +``` + +The minor symmetric part of a fourth order tensor is defined by +```math +A_{ijkl}^\text{minsym} = \frac{1}{4}(A_{ijkl} + A_{ijlk} + A_{jikl} + A_{jilk}). +``` + +```@docs +Tensors.symmetric +Tensors.minorsymmetric +Tensors.majorsymmetric +``` + +## Skew symmetric + +The skew symmetric part of a second order tensor is defined by + +```math +\mathbf{A}^\text{skw} = \frac{1}{2}(\mathbf{A} - \mathbf{A}^\text{T}) \Leftrightarrow A^\text{skw}_{ij} = \frac{1}{2}(A_{ij} - A_{ji}). +``` + +The skew symmetric part of a symmetric tensor is zero. + +```@docs +Tensors.skew +``` + +## Deviatoric tensor + +The deviatoric part of a second order tensor is defined by + +```math +\mathbf{A}^\text{dev} = \mathbf{A} - \frac{1}{3} \mathrm{tr}[\mathbf{A}] \mathbf{I} \Leftrightarrow A_{ij}^\text{dev} = A_{ij} - \frac{1}{3}A_{kk}\delta_{ij}. +``` + +```@docs +Tensors.dev +``` + +## Volumetric tensor + +The volumetric part of a second order tensor is defined by + +```math +\mathbf{A}^\text{vol} = \frac{1}{3} \mathrm{tr}[\mathbf{A}] \mathbf{I} \Leftrightarrow A_{ij}^\text{vol} = \frac{1}{3}A_{kk}\delta_{ij}. +``` + +```@docs +Tensors.vol +``` + +## Cross product + +The cross product between two vectors is defined as + +```math +\mathbf{a} = \mathbf{b} \times \mathbf{c} \Leftrightarrow a_i = \epsilon_{ijk} b_j c_k +``` + +```@docs +Tensors.cross +``` + +## Eigenvalues and eigenvectors + +The eigenvalues and eigenvectors of a (symmetric) second order tensor, ``\mathbf{A}`` can be solved from the eigenvalue problem + +```math +\mathbf{A} \cdot \mathbf{v}_i = \lambda_i \mathbf{v}_i \qquad i = 1, \dots, \text{dim} +``` + +where ``\lambda_i`` are the eigenvalues and ``\mathbf{v}_i`` are the corresponding eigenvectors. +For a symmetric fourth order tensor, ``\mathsf{A}`` the second order eigentensors and eigenvalues +can be solved from + +```math +\mathsf{A} : \mathbf{V}_i = \lambda_i \mathbf{V}_i \qquad i = 1, \dots, \text{dim} +``` + +where ``\lambda_i`` are the eigenvalues and ``\mathbf{V}_i`` the corresponding eigentensors. + +```@docs +Tensors.eigen +Tensors.eigvals +Tensors.eigvecs +``` + +## Tensor square root + +Square root of a symmetric positive definite second order tensor ``S``, +defined such that + +```math +\sqrt{\mathbf{S}} \cdot \sqrt{\mathbf{S}} = S. +``` + +```@docs +Tensors.sqrt +``` + +## Rotations + +```@docs +Tensors.rotate +Tensors.rotation_tensor +``` + +## Special operations + +For computing a special dot product between two vectors ``\mathbf{a}`` and ``\mathbf{b}`` with a fourth order symmetric tensor ``\mathbf{C}`` such that ``a_k C_{ikjl} b_l`` there is `dotdot(a, C, b)`. This function is useful because it is the expression for the tangent matrix in continuum mechanics when the displacements are approximated by scalar shape functions. + +```@docs +Tensors.dotdot +``` + +## Voigt format + +For some operations it is convenient to easily switch to the so called "Voigt"-format. +For example when solving a local problem in a plasticity model. To simplify the conversion +between tensors and Voigt format, see [`tovoigt`](@ref), [`tovoigt!`](@ref) and +[`fromvoigt`](@ref) documented below. Care must be exercised when combined with +differentiation, see [Differentiation of Voigt format](@ref) further down. + +```@docs +Tensors.tovoigt +Tensors.tovoigt! +Tensors.fromvoigt +``` + +### Differentiation of Voigt format + +Differentiating with a Voigt representation of a symmetric tensor may lead to incorrect +results when converted back to tensors. +The `tomandel`, `tomandel!`, and `frommandel` versions of +[`tovoigt`](@ref), [`tovoigt!`](@ref), and [`fromvoigt`](@ref) can then be used. As +illustrated by the following example, this will give the correct result. In general, +however, direct differentiation of `Tensor`s is faster (see +[Automatic Differentiation](@ref)). + +```jldoctest +julia> using Tensors, ForwardDiff + +julia> fun(X::SymmetricTensor{2}) = X; + +julia> A = rand(SymmetricTensor{2,2}); + +# Differentiation of a tensor directly (correct) +julia> tovoigt(gradient(fun, A)) +3×3 Matrix{Float64}: + 1.0 0.0 0.0 + 0.0 1.0 0.0 + 0.0 0.0 0.5 + +# Converting to Voigt format, perform differentiation, convert back (WRONG!) +julia> ForwardDiff.jacobian( + v -> tovoigt(fun(fromvoigt(SymmetricTensor{2,2}, v))), + tovoigt(A) + ) +3×3 Matrix{Float64}: + 1.0 0.0 0.0 + 0.0 1.0 0.0 + 0.0 0.0 1.0 + +# Converting to Mandel format, perform differentiation, convert back (correct) +julia> tovoigt( + frommandel(SymmetricTensor{4,2}, + ForwardDiff.jacobian( + v -> tomandel(fun(frommandel(SymmetricTensor{2,2}, v))), + tomandel(A) + ) + ) + ) +3×3 Matrix{Float64}: + 1.0 0.0 0.0 + 0.0 1.0 0.0 + 0.0 0.0 0.5 +``` diff --git a/docs/src/man/storing_tensors.md b/docs/src/man/storing_tensors.md index 2c4dd040..731d04e3 100644 --- a/docs/src/man/storing_tensors.md +++ b/docs/src/man/storing_tensors.md @@ -1,21 +1,21 @@ -# Storing tensors - -Even though a user mostly deals with the `Tensor{order, dim, T}` parameters, the full parameter list for a tensor is actually `Tensor{order, dim, T, N}` where `N` is the number of independent elements in the tensor. The reason for this is that the internal storage for tensors is a `NTuple{N, T}`. In order to get good performance when storing tensors in other types it is important that the container type is also parametrized on `N`. For example, when storing one symmetric second order tensor and one unsymmetric tensor, this is the preferred way: - -```julia -struct Container{dim, T, N, M} - sym_tens::SymmetricTensor{2, dim, T, N} - tens::Tensor{2, dim, T, M} -end -``` - -Leaving out the `M` and `N` would lead to bad performance. - -!!! tip - The number of independent elements `N` are already included in the `typealias` `Vec` so they can be stored with e.g. - ```julia - struct VecContainer{dim, T} - vec::Vec{dim, T} - end - ``` - without causing bad performance. +# Storing tensors + +Even though a user mostly deals with the `Tensor{order, dim, T}` parameters, the full parameter list for a tensor is actually `Tensor{order, dim, T, N}` where `N` is the number of independent elements in the tensor. The reason for this is that the internal storage for tensors is a `NTuple{N, T}`. In order to get good performance when storing tensors in other types it is important that the container type is also parametrized on `N`. For example, when storing one symmetric second order tensor and one unsymmetric tensor, this is the preferred way: + +```julia +struct Container{dim, T, N, M} + sym_tens::SymmetricTensor{2, dim, T, N} + tens::Tensor{2, dim, T, M} +end +``` + +Leaving out the `M` and `N` would lead to bad performance. + +!!! tip + The number of independent elements `N` are already included in the `typealias` `Vec` so they can be stored with e.g. + ```julia + struct VecContainer{dim, T} + vec::Vec{dim, T} + end + ``` + without causing bad performance. diff --git a/src/Tensors.jl b/src/Tensors.jl index 4cd677b2..ed50d271 100644 --- a/src/Tensors.jl +++ b/src/Tensors.jl @@ -1,180 +1,181 @@ -module Tensors - -import Base.@pure - -import Statistics -using Statistics: mean -using LinearAlgebra -using StaticArrays -# re-exports from LinearAlgebra -export ⋅, ×, dot, diagm, tr, det, norm, eigvals, eigvecs, eigen -# re-exports from Statistics -export mean - -export AbstractTensor, SymmetricTensor, Tensor, Vec, FourthOrderTensor, SecondOrderTensor - -export otimes, ⊗, ⊡, dcontract, dev, vol, symmetric, skew, minorsymmetric, majorsymmetric -export otimesu, otimesl -export minortranspose, majortranspose, isminorsymmetric, ismajorsymmetric -export tdot, dott, dotdot -export hessian, gradient, curl, divergence, laplace -export @implement_gradient -export basevec, eᵢ -export rotate, rotation_tensor -export tovoigt, tovoigt!, fromvoigt, tomandel, tomandel!, frommandel -######### -# Types # -######### -abstract type AbstractTensor{order, dim, T <: Number} <: AbstractArray{T, order} end - -""" - SymmetricTensor{order,dim,T<:Number} - -Symmetric tensor type supported for `order ∈ (2,4)` and `dim ∈ (1,2,3)`. -`SymmetricTensor{4}` is a minor symmetric tensor, such that -`A[i,j,k,l] == A[j,i,k,l]` and `A[i,j,k,l] == A[i,j,l,k]`. - -# Examples -```jldoctest -julia> SymmetricTensor{2,2,Float64}((1.0, 2.0, 3.0)) -2×2 SymmetricTensor{2, 2, Float64, 3}: - 1.0 2.0 - 2.0 3.0 -``` -""" -struct SymmetricTensor{order, dim, T, M} <: AbstractTensor{order, dim, T} - data::NTuple{M, T} - SymmetricTensor{order, dim, T, M}(data::NTuple) where {order, dim, T, M} = new{order, dim, T, M}(data) -end - -""" - Tensor{order,dim,T<:Number} - -Tensor type supported for `order ∈ (1,2,4)` and `dim ∈ (1,2,3)`. - -# Examples -```jldoctest -julia> Tensor{1,3,Float64}((1.0, 2.0, 3.0)) -3-element Vec{3, Float64}: - 1.0 - 2.0 - 3.0 -``` -""" -struct Tensor{order, dim, T, M} <: AbstractTensor{order, dim, T} - data::NTuple{M, T} - Tensor{order, dim, T, M}(data::NTuple) where {order, dim, T, M} = new{order, dim, T, M}(data) -end - -############### -# Typealiases # -############### -const Vec{dim, T, M} = Tensor{1, dim, T, dim} - -const AllTensors{dim, T} = Union{SymmetricTensor{2, dim, T}, Tensor{2, dim, T}, - SymmetricTensor{4, dim, T}, Tensor{4, dim, T}, - Vec{dim, T}, Tensor{3, dim, T}} - - -const SecondOrderTensor{dim, T} = Union{SymmetricTensor{2, dim, T}, Tensor{2, dim, T}} -const FourthOrderTensor{dim, T} = Union{SymmetricTensor{4, dim, T}, Tensor{4, dim, T}} -const SymmetricTensors{dim, T} = Union{SymmetricTensor{2, dim, T}, SymmetricTensor{4, dim, T}} -const NonSymmetricTensors{dim, T} = Union{Tensor{2, dim, T}, Tensor{4, dim, T}, Vec{dim, T}} - - -############################## -# Utility/Accessor Functions # -############################## -get_data(t::AbstractTensor) = t.data - -@pure n_components(::Type{SymmetricTensor{2, dim}}) where {dim} = dim*dim - div((dim-1)*dim, 2) -@pure function n_components(::Type{SymmetricTensor{4, dim}}) where {dim} - n = n_components(SymmetricTensor{2, dim}) - return n*n -end -@pure n_components(::Type{Tensor{order, dim}}) where {order, dim} = dim^order - -@pure get_type(::Type{Type{X}}) where {X} = X - -@pure get_base(::Type{<:Tensor{order, dim}}) where {order, dim} = Tensor{order, dim} -@pure get_base(::Type{<:SymmetricTensor{order, dim}}) where {order, dim} = SymmetricTensor{order, dim} - -@pure Base.eltype(::Type{Tensor{order, dim, T, M}}) where {order, dim, T, M} = T -@pure Base.eltype(::Type{Tensor{order, dim, T}}) where {order, dim, T} = T -@pure Base.eltype(::Type{Tensor{order, dim}}) where {order, dim} = Any -@pure Base.eltype(::Type{SymmetricTensor{order, dim, T, M}}) where {order, dim, T, M} = T -@pure Base.eltype(::Type{SymmetricTensor{order, dim, T}}) where {order, dim, T} = T -@pure Base.eltype(::Type{SymmetricTensor{order, dim}}) where {order, dim} = Any - - -############################ -# Abstract Array interface # -############################ -Base.IndexStyle(::Type{<:SymmetricTensor}) = IndexCartesian() -Base.IndexStyle(::Type{<:Tensor}) = IndexLinear() - -######## -# Size # -######## -Base.size(::Vec{dim}) where {dim} = (dim,) -Base.size(::SecondOrderTensor{dim}) where {dim} = (dim, dim) -Base.size(::Tensor{3,dim}) where {dim} = (dim, dim, dim) -Base.size(::FourthOrderTensor{dim}) where {dim} = (dim, dim, dim, dim) - -# Also define length for the type itself -Base.length(::Type{Tensor{order, dim, T, M}}) where {order, dim, T, M} = M - -######################### -# Internal constructors # -######################### -for (TensorType, orders) in ((SymmetricTensor, (2,4)), (Tensor, (2,3,4))) - for order in orders, dim in (1, 2, 3) - N = n_components(TensorType{order, dim}) - @eval begin - @inline $TensorType{$order, $dim}(t::NTuple{$N, T}) where {T} = $TensorType{$order, $dim, T, $N}(t) - @inline $TensorType{$order, $dim, T1}(t::NTuple{$N, T2}) where {T1, T2} = $TensorType{$order, $dim, T1, $N}(t) - end - if N > 1 # To avoid overwriting ::Tuple{Any} - # Heterogeneous tuple - @eval @inline $TensorType{$order, $dim}(t::Tuple{Vararg{Any,$N}}) = $TensorType{$order, $dim}(promote(t...)) - end - end - if TensorType == Tensor - for dim in (1, 2, 3) - @eval @inline Tensor{1, $dim}(t::NTuple{$dim, T}) where {T} = Tensor{1, $dim, T, $dim}(t) - if dim > 1 # To avoid overwriting ::Tuple{Any} - # Heterogeneous tuple - @eval @inline Tensor{1, $dim}(t::Tuple{Vararg{Any,$dim}}) = Tensor{1, $dim}(promote(t...)) - end - end - end -end -# Special for Vec -@inline Vec{dim}(data) where {dim} = Tensor{1, dim}(data) -@inline Vec(data::NTuple{N}) where {N} = Vec{N}(data) -@inline Vec(data::Vararg{T,N}) where {T, N} = Vec{N,T}(data) - -# General fallbacks -@inline Tensor{order, dim, T}(data::Union{AbstractArray, Tuple, Function}) where {order, dim, T} = convert(Tensor{order, dim, T}, Tensor{order, dim}(data)) -@inline SymmetricTensor{order, dim, T}(data::Union{AbstractArray, Tuple, Function}) where {order, dim, T} = convert(SymmetricTensor{order, dim, T}, SymmetricTensor{order, dim}(data)) -@inline Tensor{order, dim, T, M}(data::Union{AbstractArray, Tuple, Function}) where {order, dim, T, M} = Tensor{order, dim, T}(data) -@inline SymmetricTensor{order, dim, T, M}(data::Union{AbstractArray, Tuple, Function}) where {order, dim, T, M} = SymmetricTensor{order, dim, T}(data) - -include("indexing.jl") -include("utilities.jl") -include("tensor_ops_errors.jl") -include("automatic_differentiation.jl") -include("promotion_conversion.jl") -include("constructors.jl") -include("basic_operations.jl") -include("tensor_products.jl") -include("transpose.jl") -include("symmetric.jl") -include("math_ops.jl") -include("eigen.jl") -include("special_ops.jl") -include("simd.jl") -include("voigt.jl") -include("precompile.jl") - -end # module +module Tensors + +import Base.@pure + +import Statistics +using Statistics: mean +using LinearAlgebra +using StaticArrays +# re-exports from LinearAlgebra +export ⋅, ×, dot, diagm, tr, det, norm, eigvals, eigvecs, eigen +# re-exports from Statistics +export mean + +export AbstractTensor, SymmetricTensor, Tensor, Vec, FourthOrderTensor, SecondOrderTensor + +export otimes, ⊗, ⊡, dcontract, dev, vol, symmetric, skew, minorsymmetric, majorsymmetric +export otimesu, otimesl +export minortranspose, majortranspose, isminorsymmetric, ismajorsymmetric +export tdot, dott, dotdot +export hessian, gradient, curl, divergence, laplace +export @implement_gradient +export basevec, eᵢ +export rotate, rotation_tensor +export tovoigt, tovoigt!, fromvoigt, tomandel, tomandel!, frommandel +######### +# Types # +######### +abstract type AbstractTensor{order, dim, T <: Number} <: AbstractArray{T, order} end + +""" + SymmetricTensor{order,dim,T<:Number} + +Symmetric tensor type supported for `order ∈ (2,4)` and `dim ∈ (1,2,3)`. +`SymmetricTensor{4}` is a minor symmetric tensor, such that +`A[i,j,k,l] == A[j,i,k,l]` and `A[i,j,k,l] == A[i,j,l,k]`. + +# Examples +```jldoctest +julia> SymmetricTensor{2,2,Float64}((1.0, 2.0, 3.0)) +2×2 SymmetricTensor{2, 2, Float64, 3}: + 1.0 2.0 + 2.0 3.0 +``` +""" +struct SymmetricTensor{order, dim, T, M} <: AbstractTensor{order, dim, T} + data::NTuple{M, T} + SymmetricTensor{order, dim, T, M}(data::NTuple) where {order, dim, T, M} = new{order, dim, T, M}(data) +end + +""" + Tensor{order,dim,T<:Number} + +Tensor type supported for `order ∈ (1,2,4)` and `dim ∈ (1,2,3)`. + +# Examples +```jldoctest +julia> Tensor{1,3,Float64}((1.0, 2.0, 3.0)) +3-element Vec{3, Float64}: + 1.0 + 2.0 + 3.0 +``` +""" +struct Tensor{order, dim, T, M} <: AbstractTensor{order, dim, T} + data::NTuple{M, T} + Tensor{order, dim, T, M}(data::NTuple) where {order, dim, T, M} = new{order, dim, T, M}(data) +end + +############### +# Typealiases # +############### +const Vec{dim, T, M} = Tensor{1, dim, T, dim} + +const AllTensors{dim, T} = Union{SymmetricTensor{2, dim, T}, Tensor{2, dim, T}, + SymmetricTensor{4, dim, T}, Tensor{4, dim, T}, + Vec{dim, T}, Tensor{3, dim, T}} + + +const SecondOrderTensor{dim, T} = Union{SymmetricTensor{2, dim, T}, Tensor{2, dim, T}} +const FourthOrderTensor{dim, T} = Union{SymmetricTensor{4, dim, T}, Tensor{4, dim, T}} +const SymmetricTensors{dim, T} = Union{SymmetricTensor{2, dim, T}, SymmetricTensor{4, dim, T}} +const NonSymmetricTensors{dim, T} = Union{Tensor{2, dim, T}, Tensor{4, dim, T}, Vec{dim, T}} + + +############################## +# Utility/Accessor Functions # +############################## +get_data(t::AbstractTensor) = t.data + +@pure n_components(::Type{SymmetricTensor{2, dim}}) where {dim} = dim*dim - div((dim-1)*dim, 2) +@pure function n_components(::Type{SymmetricTensor{4, dim}}) where {dim} + n = n_components(SymmetricTensor{2, dim}) + return n*n +end +@pure n_components(::Type{Tensor{order, dim}}) where {order, dim} = dim^order + +@pure get_type(::Type{Type{X}}) where {X} = X + +@pure get_base(::Type{<:Tensor{order, dim}}) where {order, dim} = Tensor{order, dim} +@pure get_base(::Type{<:SymmetricTensor{order, dim}}) where {order, dim} = SymmetricTensor{order, dim} + +@pure Base.eltype(::Type{Tensor{order, dim, T, M}}) where {order, dim, T, M} = T +@pure Base.eltype(::Type{Tensor{order, dim, T}}) where {order, dim, T} = T +@pure Base.eltype(::Type{Tensor{order, dim}}) where {order, dim} = Any +@pure Base.eltype(::Type{SymmetricTensor{order, dim, T, M}}) where {order, dim, T, M} = T +@pure Base.eltype(::Type{SymmetricTensor{order, dim, T}}) where {order, dim, T} = T +@pure Base.eltype(::Type{SymmetricTensor{order, dim}}) where {order, dim} = Any + + +############################ +# Abstract Array interface # +############################ +Base.IndexStyle(::Type{<:SymmetricTensor}) = IndexCartesian() +Base.IndexStyle(::Type{<:Tensor}) = IndexLinear() + +######## +# Size # +######## +Base.size(::Vec{dim}) where {dim} = (dim,) +Base.size(::SecondOrderTensor{dim}) where {dim} = (dim, dim) +Base.size(::Tensor{3,dim}) where {dim} = (dim, dim, dim) +Base.size(::FourthOrderTensor{dim}) where {dim} = (dim, dim, dim, dim) + +# Also define length for the type itself +Base.length(::Type{Tensor{order, dim, T, M}}) where {order, dim, T, M} = M + +######################### +# Internal constructors # +######################### +for (TensorType, orders) in ((SymmetricTensor, (2,4)), (Tensor, (2,3,4))) + for order in orders, dim in (1, 2, 3) + N = n_components(TensorType{order, dim}) + @eval begin + @inline $TensorType{$order, $dim}(t::NTuple{$N, T}) where {T} = $TensorType{$order, $dim, T, $N}(t) + @inline $TensorType{$order, $dim, T1}(t::NTuple{$N, T2}) where {T1, T2} = $TensorType{$order, $dim, T1, $N}(t) + end + if N > 1 # To avoid overwriting ::Tuple{Any} + # Heterogeneous tuple + @eval @inline $TensorType{$order, $dim}(t::Tuple{Vararg{Any,$N}}) = $TensorType{$order, $dim}(promote(t...)) + end + end + if TensorType == Tensor + for dim in (1, 2, 3) + @eval @inline Tensor{1, $dim}(t::NTuple{$dim, T}) where {T} = Tensor{1, $dim, T, $dim}(t) + if dim > 1 # To avoid overwriting ::Tuple{Any} + # Heterogeneous tuple + @eval @inline Tensor{1, $dim}(t::Tuple{Vararg{Any,$dim}}) = Tensor{1, $dim}(promote(t...)) + end + end + end +end +# Special for Vec +@inline Vec{dim}(data) where {dim} = Tensor{1, dim}(data) +@inline Vec(data::NTuple{N}) where {N} = Vec{N}(data) +@inline Vec(data::Vararg{T,N}) where {T, N} = Vec{N,T}(data) + +# General fallbacks +@inline Tensor{order, dim, T}(data::Union{AbstractArray, Tuple, Function}) where {order, dim, T} = convert(Tensor{order, dim, T}, Tensor{order, dim}(data)) +@inline SymmetricTensor{order, dim, T}(data::Union{AbstractArray, Tuple, Function}) where {order, dim, T} = convert(SymmetricTensor{order, dim, T}, SymmetricTensor{order, dim}(data)) +@inline Tensor{order, dim, T, M}(data::Union{AbstractArray, Tuple, Function}) where {order, dim, T, M} = Tensor{order, dim, T}(data) +@inline SymmetricTensor{order, dim, T, M}(data::Union{AbstractArray, Tuple, Function}) where {order, dim, T, M} = SymmetricTensor{order, dim, T}(data) + +include("indexing.jl") +include("utilities.jl") +include("tensor_product_macro.jl") +include("tensor_ops_errors.jl") +include("automatic_differentiation.jl") +include("promotion_conversion.jl") +include("constructors.jl") +include("basic_operations.jl") +include("tensor_products.jl") +include("transpose.jl") +include("symmetric.jl") +include("math_ops.jl") +include("eigen.jl") +include("special_ops.jl") +include("simd.jl") +include("voigt.jl") +include("precompile.jl") + +end # module diff --git a/src/automatic_differentiation.jl b/src/automatic_differentiation.jl index d0aaa877..3d7cfdcb 100644 --- a/src/automatic_differentiation.jl +++ b/src/automatic_differentiation.jl @@ -1,623 +1,623 @@ -import ForwardDiff: Dual, partials, value, Tag - -@static if isdefined(LinearAlgebra, :gradient) - import LinearAlgebra.gradient -end - -###################### -# Extraction methods # -###################### - -# Extractions are supposed to unpack the value and the partials -# The partials should be put into a tensor of higher order. -# The extraction methods need to know the input type to the function -# that generated the result. The reason for this is that there is no -# difference in the output type (except the number of partials) for -# norm(v) and det(T) where v is a vector and T is a second order tensor. - -#################### -# Value extraction # -#################### - -# Scalar output -> Scalar value -""" - function _extract_value(v::ForwardDiff.Dual) - function _extract_value(v::AbstractTensor{<:Any,<:Any,<:Dual}) - -Extract the non-dual part of a tensor with dual entries. This -function is useful when inserting analytical derivatives using -the [`_insert_gradient`](@ref) function -""" -@inline function _extract_value(v::Dual) - return value(v) -end -# AbstractTensor output -> AbstractTensor gradient -@generated function _extract_value(v::AbstractTensor{<:Any,<:Any,<:Dual}) - TensorType = get_base(v) - ex = Expr(:tuple) - for i in 1:n_components(TensorType) - # Can use linear indexing even for SymmetricTensor - # when indexing the underlying tuple - push!(ex.args, :(value(get_data(v)[$i]))) - end - quote - $(Expr(:meta, :inline)) - @inbounds return $TensorType($ex) - end -end - -####################### -# Gradient extraction # -####################### - -# Scalar output, Scalar input -> Scalar gradient -@inline function _extract_gradient(v::Dual, ::Number) - return @inbounds partials(v)[1] -end -# Vec, Tensor{2/4}, SymmetricTensor{2/4} output, Scalar input -> Vec, Tensor{2/4}, SymmetricTensor{2/4} gradient -@generated function _extract_gradient(v::AbstractTensor{<:Any,<:Any,<:Dual}, ::Number) - TensorType = get_base(v) - ex = Expr(:tuple) - for i in 1:n_components(TensorType) - # Can use linear indexing even for SymmetricTensor - # when indexing the underlying tuple - push!(ex.args, :(partials(get_data(v)[$i])[1])) - end - quote - $(Expr(:meta, :inline)) - @inbounds return $TensorType($ex) - end -end -# Scalar output, Vec input -> Vec gradient -@inline function _extract_gradient(v::Dual, ::Vec{N}) where {N} - return Vec{N}(partials(v).values) -end -# Scalar output, SecondOrderTensor input -> SecondOrderTensor gradient -@inline function _extract_gradient(v::Dual, ::SymmetricTensor{2, dim}) where {dim} - return SymmetricTensor{2, dim}(partials(v).values) -end -@inline function _extract_gradient(v::Dual, ::Tensor{2, dim}) where {dim} - return Tensor{2, dim}(partials(v).values) -end - -# Vec output, Vec input -> Tensor{2} gradient -@inline function _extract_gradient(v::Vec{1, <: Dual}, ::Vec{1}) - @inbounds begin - p1 = partials(v[1]) - ∇f = Tensor{2, 1}((p1[1],)) - end - return ∇f -end -# Vec output, Vec input -> Tensor{2} gradient -@inline function _extract_gradient(v::Vec{2, <: Dual}, ::Vec{2}) - @inbounds begin - p1, p2 = partials(v[1]), partials(v[2]) - ∇f = Tensor{2, 2}((p1[1], p2[1], p1[2], p2[2])) - end - return ∇f -end -# Vec output, Vec input -> Tensor{2} gradient -@inline function _extract_gradient(v::Vec{3, <: Dual}, ::Vec{3}) - @inbounds begin - p1, p2, p3 = partials(v[1]), partials(v[2]), partials(v[3]) - ∇f = Tensor{2, 3}((p1[1], p2[1], p3[1], p1[2], p2[2], p3[2], p1[3], p2[3], p3[3])) - end - return ∇f -end - -# Tensor{2} output, Tensor{2} input -> Tensor{4} gradient -@inline function _extract_gradient(v::Tensor{2, 1, <: Dual}, ::Tensor{2, 1}) - @inbounds begin - p1 = partials(v[1,1]) - ∇f = Tensor{4, 1}((p1[1],)) - end - return ∇f -end -# SymmetricTensor{2} output, SymmetricTensor{2} input -> SymmetricTensor{4} gradient -@inline function _extract_gradient(v::SymmetricTensor{2, 1, <: Dual}, ::SymmetricTensor{2, 1}) - @inbounds begin - p1 = partials(v[1,1]) - ∇f = SymmetricTensor{4, 1}((p1[1],)) - end - return ∇f -end -# Tensor{2} output, Vec input -> Tensor{3} gradient -@inline function _extract_gradient(v::Tensor{2, 1, <: Dual}, ::Vec{1}) - @inbounds begin - p1 = partials(v[1,1]) - ∇f = Tensor{3, 1}((p1[1],)) - end - return ∇f -end -# Tensor{2} output, Tensor{2} input -> Tensor{4} gradient -@inline function _extract_gradient(v::Tensor{2, 2, <: Dual}, ::Tensor{2, 2}) - @inbounds begin - p1, p2, p3, p4 = partials(v[1,1]), partials(v[2,1]), partials(v[1,2]), partials(v[2,2]) - ∇f = Tensor{4, 2}((p1[1], p2[1], p3[1], p4[1], - p1[2], p2[2], p3[2], p4[2], - p1[3], p2[3], p3[3], p4[3], - p1[4], p2[4], p3[4], p4[4])) - end - return ∇f -end -# SymmetricTensor{2} output, SymmetricTensor{2} input -> SymmetricTensor{4} gradient -@inline function _extract_gradient(v::SymmetricTensor{2, 2, <: Dual}, ::SymmetricTensor{2, 2}) - @inbounds begin - p1, p2, p3 = partials(v[1,1]), partials(v[2,1]), partials(v[2,2]) - ∇f = SymmetricTensor{4, 2}((p1[1], p2[1], p3[1], - p1[2], p2[2], p3[2], - p1[3], p2[3], p3[3])) - end - return ∇f -end -# Tensor{2} output, Vec input -> Tensor{3} gradient -@inline function _extract_gradient(v::Tensor{2, 2, <: Dual}, ::Vec{2}) - @inbounds begin - p1, p2, p3, p4 = partials(v[1,1]), partials(v[2,1]), partials(v[1,2]), partials(v[2,2]) - ∇f = Tensor{3, 2}((p1[1], p2[1], p3[1], p4[1], - p1[2], p2[2], p3[2], p4[2])) - end - return ∇f -end -# Tensor{2} output, Tensor{2} input -> Tensor{4} gradient -@inline function _extract_gradient(v::Tensor{2, 3, <: Dual}, ::Tensor{2, 3}) - @inbounds begin - p1, p2, p3 = partials(v[1,1]), partials(v[2,1]), partials(v[3,1]) - p4, p5, p6 = partials(v[1,2]), partials(v[2,2]), partials(v[3,2]) - p7, p8, p9 = partials(v[1,3]), partials(v[2,3]), partials(v[3,3]) - ∇f = Tensor{4, 3}((p1[1], p2[1], p3[1], p4[1], p5[1], p6[1], p7[1], p8[1], p9[1], - p1[2], p2[2], p3[2], p4[2], p5[2], p6[2], p7[2], p8[2], p9[2], # ### # - p1[3], p2[3], p3[3], p4[3], p5[3], p6[3], p7[3], p8[3], p9[3], # # # # - p1[4], p2[4], p3[4], p4[4], p5[4], p6[4], p7[4], p8[4], p9[4], ### ### ### - p1[5], p2[5], p3[5], p4[5], p5[5], p6[5], p7[5], p8[5], p9[5], - p1[6], p2[6], p3[6], p4[6], p5[6], p6[6], p7[6], p8[6], p9[6], - p1[7], p2[7], p3[7], p4[7], p5[7], p6[7], p7[7], p8[7], p9[7], - p1[8], p2[8], p3[8], p4[8], p5[8], p6[8], p7[8], p8[8], p9[8], - p1[9], p2[9], p3[9], p4[9], p5[9], p6[9], p7[9], p8[9], p9[9])) - end - return ∇f -end -# SymmetricTensor{2} output, SymmetricTensor{2} input -> SymmetricTensor{4} gradient -@inline function _extract_gradient(v::SymmetricTensor{2, 3, <: Dual}, ::SymmetricTensor{2, 3}) - @inbounds begin - p1, p2, p3 = partials(v[1,1]), partials(v[2,1]), partials(v[3,1]) - p4, p5, p6 = partials(v[2,2]), partials(v[3,2]), partials(v[3,3]) - ∇f = SymmetricTensor{4, 3}((p1[1], p2[1], p3[1], p4[1], p5[1], p6[1], - p1[2], p2[2], p3[2], p4[2], p5[2], p6[2], - p1[3], p2[3], p3[3], p4[3], p5[3], p6[3], - p1[4], p2[4], p3[4], p4[4], p5[4], p6[4], - p1[5], p2[5], p3[5], p4[5], p5[5], p6[5], - p1[6], p2[6], p3[6], p4[6], p5[6], p6[6])) - end - return ∇f -end - -# Tensor{2} output, Vec input -> Tensor{3} gradient -@inline function _extract_gradient(v::Tensor{2, 3, <: Dual}, ::Vec{3}) - @inbounds begin - p1, p2, p3 = partials(v[1,1]), partials(v[2,1]), partials(v[3,1]) - p4, p5, p6 = partials(v[1,2]), partials(v[2,2]), partials(v[3,2]) - p7, p8, p9 = partials(v[1,3]), partials(v[2,3]), partials(v[3,3]) - ∇f = Tensor{3, 3}((p1[1], p2[1], p3[1], p4[1], p5[1], p6[1], p7[1], p8[1], p9[1], - p1[2], p2[2], p3[2], p4[2], p5[2], p6[2], p7[2], p8[2], p9[2], - p1[3], p2[3], p3[3], p4[3], p5[3], p6[3], p7[3], p8[3], p9[3])) - end - return ∇f -end - -# for non dual variable -@inline function _extract_value(v::Any) - return v -end -for TensorType in (Tensor, SymmetricTensor) - @eval begin - @inline function _extract_gradient(v::T, x::$TensorType{order, dim}) where {T<:Real, order, dim} - zero($TensorType{order, dim, T}) - end - @generated function _extract_gradient(v::$TensorType{order, dim, T}, ::$TensorType{order, dim}) where {T<:Real, order, dim} - RetType = $TensorType{order+order, dim, T} - return quote - $(Expr(:meta, :inline)) - zero($RetType) - end - end - end -end - -###################### -# Gradient insertion # -###################### - -# Insertions get the real value and derivative of a function, as well -# a tensor of dual values that was the initial input to that function. -# A new tensor of dual values are then created, to emulate the function -# being run with dual numbers (i.e. inserting the analytical gradient) -# As opposed to with gradient extraction, we don't have the original input -# (scalar or tensor) to the gradient function. But we can create this based -# on the tag created in the `gradient` function. -# Specifically, consider a function y=f(g(x)) where we want to supply the -# derivative df/dg (at g(x)). We then have "dy/dx = df/dg dg/dx" where -# the type of product is given by the type of g: -# g is 0th order: open product ^1 -# g is 1st order: single contraction -# g is 2nd order: double contraction -# -# ^1: Regular multiplication for scalars, but in case x and f -# are vectors, then it is open product. -# -# Support is given for the following function configurations -# g (input) f (output) dfdg (derivative) -# 2nd order 0th order 2nd order -# 1st order 1st order 2nd order -# 2nd order 2nd order 4th order - -# First, we define the API macro used to supply the analytical derivative -""" - @implement_gradient(f, f_dfdx) - -This macro allows specifying a function `f_dfdx` that provides an analytical -derivative of the function `f`, and is invoked when `f` is differentiated -using automatic differentiation based on `ForwardDiff.jl` -(e.g. when using `Tensors.jl`'s -[`gradient`](@ref) or [`hessian`](@ref)), or one of `ForwardDiff.jl`'s API). -The function `f_dfdx` must take -the same argument as `f` and should return both the value of `f` and -the gradient, i.e. `fval, dfdx_val = f_dfdx(x)`. The following combinations -of input and output types are supported: - -| `x` | `f(x)` | `dfdx` | -|:--------------------|:--------------------|:--------------------| -| `Number` | `Number` | `Number` | -| `Number` | `Vec` | `Vec` | -| `Number` | `SecondOrderTensor` | `SecondOrderTensor` | -| `Vec` | `Number` | `Vec` | -| `Vec` | `Vec` | `Tensor{2}` | -| `SecondOrderTensor` | `Number` | `SecondOrderTensor` | -| `SecondOrderTensor` | `SecondOrderTensor` | `FourthOrderTensor` | - -Note that if one tensor if of symmetric type, then all tensors must -be of symmetric type - -""" -macro implement_gradient(f, f_dfdx) - return :($(esc(f))(x :: Union{AbstractTensor{<:Any, <:Any, <:Dual}, Dual}) = _propagate_gradient($(esc(f_dfdx)), x)) -end -# which calls the general function _propagate_gradient that calls the specialized _insert_gradient method below -function _propagate_gradient(f_dfdx::Function, x::Union{AbstractTensor{<:Any, <:Any, <:Dual}, Dual}) - fval, dfdx_val = f_dfdx(_extract_value(x)) - return _insert_gradient(fval, dfdx_val, x) -end - -# Define the _insert_gradient method -""" - _insert_gradient(f::Union{Number,AbstractTensor}, dfdg::Union{Number,AbstractTensor}, g::ForwardDiff.Dual) - _insert_gradient(f::Union{Number,AbstractTensor}, dfdg::Union{Number,AbstractTensor}, g::Vec{<:Any,<:ForwardDiff.Dual}) - _insert_gradient(f::Union{Number,AbstractTensor}, dfdg::Union{Number,AbstractTensor}, g::SecondOrderTensor{<:Any,<:ForwardDiff.Dual}) - -Allows inserting an analytical gradient for use with automatic differentiation. -Consider a composed function ``h(f(g(x)))``, where you have an efficient way to -calculate ``\\partial f/\\partial g``, but want to use automatic -differentiation for the other functions. Then, you can make another definition -of ``f(g)`` to dispatch on if ``g`` is a tensor with `ForwardDiff.Dual` -entires, i.e. -```julia -function f(g::Tensor{2,dim,T}) where{dim, T<:ForwardDiff.Dual} - gval = _extract_value(g) # Get the non-dual tensor value - fval = f(gval) # Calculate function value - dfdg = dfdg_analytical(fval, gval) # Calculate analytical derivative - return _insert_gradient(fval, dfdg, g) # Return the updated dual tensor -end -``` - -""" -function _insert_gradient(f::Union{Number,AbstractTensor}, dfdg::Union{Number,AbstractTensor}, g::Dual{Tg}) where{Tg} - dgdx = _extract_gradient(g, _get_original_gradient_input(g)) - dfdx = dfdg ⊗ dgdx - return _insert_full_gradient(f, dfdx, Tg()) -end - -function _insert_gradient(f::Union{Number,AbstractTensor}, dfdg::Union{Number,AbstractTensor}, g::Vec{<:Any, <:Dual{Tg}}) where{Tg} - dgdx = _extract_gradient(g, _get_original_gradient_input(g)) - dfdx = dfdg ⋅ dgdx - return _insert_full_gradient(f, dfdx, Tg()) -end - -function _insert_gradient(f::Union{Number,AbstractTensor}, dfdg::Union{Number,AbstractTensor}, g::SecondOrderTensor{<:Any,<:Dual{Tg}}) where{Tg} - dgdx = _extract_gradient(g, _get_original_gradient_input(g)) - dfdx = dfdg ⊡ dgdx - return _insert_full_gradient(f, dfdx, Tg()) -end - -# Define helper function to figure out original input to gradient function -_get_original_gradient_input(::Dual{Tag{Tf,Tv}}) where{Tf,Tv} = zero(Tv) -_get_original_gradient_input(::AbstractTensor{<:Any,<:Any,<:Dual{Tag{Tf,Tv}}}) where{Tf,Tv} = zero(Tv) - -# Define helper function to insert_the_full_gradient calculated in _insert_gradient -_insert_full_gradient(f::Number, dfdx::Number, ::Tg) where{Tg} = Dual{Tg}(f, dfdx) -_insert_full_gradient(f::Number, dfdx::AbstractTensor, ::Tg) where{Tg} = Dual{Tg}(f, get_data(dfdx)) - -function _insert_full_gradient(f::TT, dfdx::TT, ::Tg) where{TT<:AbstractTensor,Tg} - fdata = get_data(f) - diffdata = get_data(dfdx) - TTb = get_base(TT) - @inbounds y = TTb(ntuple(i -> Dual{Tg}(fdata[i], diffdata[i]), length(fdata))) - return y -end - -function _insert_full_gradient(f::Vec{dim}, dfdx::Tensor{2,dim}, ::Tg) where{dim, Tg} - fdata = get_data(f) - diffdata = get_data(dfdx) - @inbounds y = Vec{dim}(i -> Dual{Tg}(fdata[i], ntuple(j->diffdata[i+dim*(j-1)], dim))) - return y -end - -function _insert_full_gradient(f::Tensor{2,dim,<:Any,N}, dfdx::Tensor{4,dim}, ::Tg) where{dim, N, Tg} - fdata = get_data(f) - diffdata = get_data(dfdx) - @inbounds y = Tensor{2,dim}(ntuple(i->Dual{Tg}(fdata[i], ntuple(j->diffdata[i+N*(j-1)],N)), N)) - return y -end -function _insert_full_gradient(f::SymmetricTensor{2,dim,<:Any,N}, dfdx::SymmetricTensor{4,dim}, ::Tg) where{dim, N, Tg} - fdata = get_data(f) - diffdata = get_data(dfdx) - @inbounds y = SymmetricTensor{2,dim}(ntuple(i->Dual{Tg}(fdata[i], ntuple(j->diffdata[i+N*(j-1)],N)), N)) - return y -end - - -################## -# Load functions # -################## - -# Loaders are supposed to take a tensor of real values and convert it -# into a tensor of dual values where the seeds are correctly defined. - -@inline function _load(v::Number, ::Tg) where Tg - return Dual{Tg}(v, one(v)) -end - -@inline function _load(v::Vec{1, T}, ::Tg) where {T, Tg} - @inbounds v_dual = Vec{1}((Dual{Tg}(v[1], one(T)),)) - return v_dual -end - -@inline function _load(v::Vec{2, T}, ::Tg) where {T, Tg} - o = one(T) - z = zero(T) - @inbounds v_dual = Vec{2}((Dual{Tg}(v[1], o, z), - Dual{Tg}(v[2], z, o))) - return v_dual -end - -@inline function _load(v::Vec{3, T}, ::Tg) where {T, Tg} - o = one(T) - z = zero(T) - @inbounds v_dual = Vec{3}((Dual{Tg}(v[1], o, z, z), - Dual{Tg}(v[2], z, o, z), - Dual{Tg}(v[3], z, z, o))) - return v_dual -end - -# Second order tensors -@inline function _load(v::Tensor{2, 1, T}, ::Tg) where {T, Tg} - @inbounds v_dual = Tensor{2, 1}((Dual{Tg}(get_data(v)[1], one(T)),)) - return v_dual -end - -@inline function _load(v::SymmetricTensor{2, 1, T}, ::Tg) where {T, Tg} - @inbounds v_dual = SymmetricTensor{2, 1}((Dual{Tg}(get_data(v)[1], one(T)),)) - return v_dual -end - -@inline function _load(v::Tensor{2, 2, T}, ::Tg) where {T, Tg} - data = get_data(v) - o = one(T) - z = zero(T) - @inbounds v_dual = Tensor{2, 2}((Dual{Tg}(data[1], o, z, z, z), - Dual{Tg}(data[2], z, o, z, z), - Dual{Tg}(data[3], z, z, o, z), - Dual{Tg}(data[4], z, z, z, o))) - return v_dual -end - -@inline function _load(v::SymmetricTensor{2, 2, T}, ::Tg) where {T, Tg} - data = get_data(v) - o = one(T) - o2 = convert(T, 1/2) - z = zero(T) - @inbounds v_dual = SymmetricTensor{2, 2}((Dual{Tg}(data[1], o, z, z), - Dual{Tg}(data[2], z, o2, z), - Dual{Tg}(data[3], z, z, o))) - return v_dual -end - -@inline function _load(v::Tensor{2, 3, T}, ::Tg) where {T, Tg} - data = get_data(v) - o = one(T) - z = zero(T) - @inbounds v_dual = Tensor{2, 3}((Dual{Tg}(data[1], o, z, z, z, z, z, z, z, z), - Dual{Tg}(data[2], z, o, z, z, z, z, z, z, z), - Dual{Tg}(data[3], z, z, o, z, z, z, z, z, z), - Dual{Tg}(data[4], z, z, z, o, z, z, z, z, z), - Dual{Tg}(data[5], z, z, z, z, o, z, z, z, z), - Dual{Tg}(data[6], z, z, z, z, z, o, z, z, z), - Dual{Tg}(data[7], z, z, z, z, z, z, o, z, z), - Dual{Tg}(data[8], z, z, z, z, z, z, z, o, z), - Dual{Tg}(data[9], z, z, z, z, z, z, z, z, o))) - return v_dual -end - -@inline function _load(v::SymmetricTensor{2, 3, T}, ::Tg) where {T, Tg} - data = get_data(v) - o = one(T) - o2 = convert(T, 1/2) - z = zero(T) - @inbounds v_dual = SymmetricTensor{2, 3}((Dual{Tg}(data[1], o, z, z, z, z, z), - Dual{Tg}(data[2], z, o2, z, z, z, z), - Dual{Tg}(data[3], z, z, o2, z, z, z), - Dual{Tg}(data[4], z, z, z, o, z, z), - Dual{Tg}(data[5], z, z, z, z, o2, z), - Dual{Tg}(data[6], z, z, z, z, z, o))) - return v_dual -end - -""" - gradient(f::Function, v::Union{SecondOrderTensor, Vec, Number}) - gradient(f::Function, v::Union{SecondOrderTensor, Vec, Number}, :all) - -Computes the gradient of the input function. If the (pseudo)-keyword `all` -is given, the value of the function is also returned as a second output argument. - -# Examples -```jldoctest -julia> A = rand(SymmetricTensor{2, 2}); - -julia> ∇f = gradient(norm, A) -2×2 SymmetricTensor{2, 2, Float64, 3}: - 0.434906 0.56442 - 0.56442 0.416793 - -julia> ∇f, f = gradient(norm, A, :all); -``` -""" -function gradient(f::F, v::V) where {F, V <: Union{SecondOrderTensor, Vec, Number}} - v_dual = _load(v, Tag(f, V)) - res = f(v_dual) - return _extract_gradient(res, v) -end -function gradient(f::F, v::V, ::Symbol) where {F, V <: Union{SecondOrderTensor, Vec, Number}} - v_dual = _load(v, Tag(f, V)) - res = f(v_dual) - return _extract_gradient(res, v), _extract_value(res) -end -const ∇ = gradient - -""" - hessian(f::Function, v::Union{SecondOrderTensor, Vec, Number}) - hessian(f::Function, v::Union{SecondOrderTensor, Vec, Number}, :all) - -Computes the hessian of the input function. If the (pseudo)-keyword `all` -is given, the lower order results (gradient and value) of the function is -also returned as a second and third output argument. - -# Examples -```jldoctest -julia> A = rand(SymmetricTensor{2, 2}); - -julia> ∇∇f = hessian(norm, A) -2×2×2×2 SymmetricTensor{4, 2, Float64, 9}: -[:, :, 1, 1] = - 0.596851 -0.180684 - -0.180684 -0.133425 - -[:, :, 2, 1] = - -0.180684 0.133546 - 0.133546 -0.173159 - -[:, :, 1, 2] = - -0.180684 0.133546 - 0.133546 -0.173159 - -[:, :, 2, 2] = - -0.133425 -0.173159 - -0.173159 0.608207 - -julia> ∇∇f, ∇f, f = hessian(norm, A, :all); -``` -""" -function hessian(f::F, v::Union{SecondOrderTensor, Vec, Number}) where {F} - gradf = y -> gradient(f, y) - return gradient(gradf, v) -end - -function hessian(f::F, v::Union{SecondOrderTensor, Vec, Number}, ::Symbol) where {F} - gradf = y -> gradient(f, y) - return gradient(gradf, v), gradient(f, v, :all)... -end -const ∇∇ = hessian - -""" - divergence(f, x) - -Calculate the divergence of the vector field `f`, in the point `x`. - -# Examples -```jldoctest -julia> f(x) = 2x; - -julia> x = rand(Vec{3}); - -julia> divergence(f, x) -6.0 -``` -""" -divergence(f::F, v::Vec) where {F<:Function} = tr(gradient(f, v)) - -""" - curl(f, x) - -Calculate the curl of the vector field `f`, in the point `x`. - -# Examples -```jldoctest -julia> f(x) = Vec{3}((x[2], x[3], -x[1])); - -julia> x = rand(Vec{3}); - -julia> curl(f, x) -3-element Vec{3, Float64}: - -1.0 - 1.0 - -1.0 -``` -""" -function curl(f::F, v::Vec{3}) where F - @inbounds begin - ∇f = gradient(f, v) - c = Vec{3}((∇f[3,2] - ∇f[2,3], ∇f[1,3] - ∇f[3,1], ∇f[2,1] - ∇f[1,2])) - end - return c -end -curl(f::F, v::Vec{1, T}) where {F, T} = curl(f, Vec{3}((v[1], T(0), T(0)))) -curl(f::F, v::Vec{2, T}) where {F, T} = curl(f, Vec{3}((v[1], v[2], T(0)))) - -""" - laplace(f, x) - -Calculate the laplacian of the field `f`, in the point `x`. -If `f` is a vector field, use broadcasting. - -# Examples -```jldoctest -julia> x = rand(Vec{3}); - -julia> f(x) = norm(x); - -julia> laplace(f, x) -1.7833701103136868 - -julia> g(x) = x*norm(x); - -julia> laplace.(g, x) -3-element Vec{3, Float64}: - 2.107389336871036 - 2.7349658311504834 - 2.019621767876747 -``` -""" -function laplace(f::F, v) where F - return divergence(x -> gradient(f, x), v) -end -const Δ = laplace - -function Broadcast.broadcasted(::typeof(laplace), f::F, v::V) where {F, V <: Vec{3}} - @inbounds begin - tag = Tag(f, V) - vdd = _load(_load(v, tag), tag) - res = f(vdd) - v1 = res[1].partials[1].partials[1] + res[1].partials[2].partials[2] + res[1].partials[3].partials[3] - v2 = res[2].partials[1].partials[1] + res[2].partials[2].partials[2] + res[2].partials[3].partials[3] - v3 = res[3].partials[1].partials[1] + res[3].partials[2].partials[2] + res[3].partials[3].partials[3] - end - return Vec{3}((v1, v2, v3)) -end +import ForwardDiff: Dual, partials, value, Tag + +@static if isdefined(LinearAlgebra, :gradient) + import LinearAlgebra.gradient +end + +###################### +# Extraction methods # +###################### + +# Extractions are supposed to unpack the value and the partials +# The partials should be put into a tensor of higher order. +# The extraction methods need to know the input type to the function +# that generated the result. The reason for this is that there is no +# difference in the output type (except the number of partials) for +# norm(v) and det(T) where v is a vector and T is a second order tensor. + +#################### +# Value extraction # +#################### + +# Scalar output -> Scalar value +""" + function _extract_value(v::ForwardDiff.Dual) + function _extract_value(v::AbstractTensor{<:Any,<:Any,<:Dual}) + +Extract the non-dual part of a tensor with dual entries. This +function is useful when inserting analytical derivatives using +the [`_insert_gradient`](@ref) function +""" +@inline function _extract_value(v::Dual) + return value(v) +end +# AbstractTensor output -> AbstractTensor gradient +@generated function _extract_value(v::AbstractTensor{<:Any,<:Any,<:Dual}) + TensorType = get_base(v) + ex = Expr(:tuple) + for i in 1:n_components(TensorType) + # Can use linear indexing even for SymmetricTensor + # when indexing the underlying tuple + push!(ex.args, :(value(get_data(v)[$i]))) + end + quote + $(Expr(:meta, :inline)) + @inbounds return $TensorType($ex) + end +end + +####################### +# Gradient extraction # +####################### + +# Scalar output, Scalar input -> Scalar gradient +@inline function _extract_gradient(v::Dual, ::Number) + return @inbounds partials(v)[1] +end +# Vec, Tensor{2/4}, SymmetricTensor{2/4} output, Scalar input -> Vec, Tensor{2/4}, SymmetricTensor{2/4} gradient +@generated function _extract_gradient(v::AbstractTensor{<:Any,<:Any,<:Dual}, ::Number) + TensorType = get_base(v) + ex = Expr(:tuple) + for i in 1:n_components(TensorType) + # Can use linear indexing even for SymmetricTensor + # when indexing the underlying tuple + push!(ex.args, :(partials(get_data(v)[$i])[1])) + end + quote + $(Expr(:meta, :inline)) + @inbounds return $TensorType($ex) + end +end +# Scalar output, Vec input -> Vec gradient +@inline function _extract_gradient(v::Dual, ::Vec{N}) where {N} + return Vec{N}(partials(v).values) +end +# Scalar output, SecondOrderTensor input -> SecondOrderTensor gradient +@inline function _extract_gradient(v::Dual, ::SymmetricTensor{2, dim}) where {dim} + return SymmetricTensor{2, dim}(partials(v).values) +end +@inline function _extract_gradient(v::Dual, ::Tensor{2, dim}) where {dim} + return Tensor{2, dim}(partials(v).values) +end + +# Vec output, Vec input -> Tensor{2} gradient +@inline function _extract_gradient(v::Vec{1, <: Dual}, ::Vec{1}) + @inbounds begin + p1 = partials(v[1]) + ∇f = Tensor{2, 1}((p1[1],)) + end + return ∇f +end +# Vec output, Vec input -> Tensor{2} gradient +@inline function _extract_gradient(v::Vec{2, <: Dual}, ::Vec{2}) + @inbounds begin + p1, p2 = partials(v[1]), partials(v[2]) + ∇f = Tensor{2, 2}((p1[1], p2[1], p1[2], p2[2])) + end + return ∇f +end +# Vec output, Vec input -> Tensor{2} gradient +@inline function _extract_gradient(v::Vec{3, <: Dual}, ::Vec{3}) + @inbounds begin + p1, p2, p3 = partials(v[1]), partials(v[2]), partials(v[3]) + ∇f = Tensor{2, 3}((p1[1], p2[1], p3[1], p1[2], p2[2], p3[2], p1[3], p2[3], p3[3])) + end + return ∇f +end + +# Tensor{2} output, Tensor{2} input -> Tensor{4} gradient +@inline function _extract_gradient(v::Tensor{2, 1, <: Dual}, ::Tensor{2, 1}) + @inbounds begin + p1 = partials(v[1,1]) + ∇f = Tensor{4, 1}((p1[1],)) + end + return ∇f +end +# SymmetricTensor{2} output, SymmetricTensor{2} input -> SymmetricTensor{4} gradient +@inline function _extract_gradient(v::SymmetricTensor{2, 1, <: Dual}, ::SymmetricTensor{2, 1}) + @inbounds begin + p1 = partials(v[1,1]) + ∇f = SymmetricTensor{4, 1}((p1[1],)) + end + return ∇f +end +# Tensor{2} output, Vec input -> Tensor{3} gradient +@inline function _extract_gradient(v::Tensor{2, 1, <: Dual}, ::Vec{1}) + @inbounds begin + p1 = partials(v[1,1]) + ∇f = Tensor{3, 1}((p1[1],)) + end + return ∇f +end +# Tensor{2} output, Tensor{2} input -> Tensor{4} gradient +@inline function _extract_gradient(v::Tensor{2, 2, <: Dual}, ::Tensor{2, 2}) + @inbounds begin + p1, p2, p3, p4 = partials(v[1,1]), partials(v[2,1]), partials(v[1,2]), partials(v[2,2]) + ∇f = Tensor{4, 2}((p1[1], p2[1], p3[1], p4[1], + p1[2], p2[2], p3[2], p4[2], + p1[3], p2[3], p3[3], p4[3], + p1[4], p2[4], p3[4], p4[4])) + end + return ∇f +end +# SymmetricTensor{2} output, SymmetricTensor{2} input -> SymmetricTensor{4} gradient +@inline function _extract_gradient(v::SymmetricTensor{2, 2, <: Dual}, ::SymmetricTensor{2, 2}) + @inbounds begin + p1, p2, p3 = partials(v[1,1]), partials(v[2,1]), partials(v[2,2]) + ∇f = SymmetricTensor{4, 2}((p1[1], p2[1], p3[1], + p1[2], p2[2], p3[2], + p1[3], p2[3], p3[3])) + end + return ∇f +end +# Tensor{2} output, Vec input -> Tensor{3} gradient +@inline function _extract_gradient(v::Tensor{2, 2, <: Dual}, ::Vec{2}) + @inbounds begin + p1, p2, p3, p4 = partials(v[1,1]), partials(v[2,1]), partials(v[1,2]), partials(v[2,2]) + ∇f = Tensor{3, 2}((p1[1], p2[1], p3[1], p4[1], + p1[2], p2[2], p3[2], p4[2])) + end + return ∇f +end +# Tensor{2} output, Tensor{2} input -> Tensor{4} gradient +@inline function _extract_gradient(v::Tensor{2, 3, <: Dual}, ::Tensor{2, 3}) + @inbounds begin + p1, p2, p3 = partials(v[1,1]), partials(v[2,1]), partials(v[3,1]) + p4, p5, p6 = partials(v[1,2]), partials(v[2,2]), partials(v[3,2]) + p7, p8, p9 = partials(v[1,3]), partials(v[2,3]), partials(v[3,3]) + ∇f = Tensor{4, 3}((p1[1], p2[1], p3[1], p4[1], p5[1], p6[1], p7[1], p8[1], p9[1], + p1[2], p2[2], p3[2], p4[2], p5[2], p6[2], p7[2], p8[2], p9[2], # ### # + p1[3], p2[3], p3[3], p4[3], p5[3], p6[3], p7[3], p8[3], p9[3], # # # # + p1[4], p2[4], p3[4], p4[4], p5[4], p6[4], p7[4], p8[4], p9[4], ### ### ### + p1[5], p2[5], p3[5], p4[5], p5[5], p6[5], p7[5], p8[5], p9[5], + p1[6], p2[6], p3[6], p4[6], p5[6], p6[6], p7[6], p8[6], p9[6], + p1[7], p2[7], p3[7], p4[7], p5[7], p6[7], p7[7], p8[7], p9[7], + p1[8], p2[8], p3[8], p4[8], p5[8], p6[8], p7[8], p8[8], p9[8], + p1[9], p2[9], p3[9], p4[9], p5[9], p6[9], p7[9], p8[9], p9[9])) + end + return ∇f +end +# SymmetricTensor{2} output, SymmetricTensor{2} input -> SymmetricTensor{4} gradient +@inline function _extract_gradient(v::SymmetricTensor{2, 3, <: Dual}, ::SymmetricTensor{2, 3}) + @inbounds begin + p1, p2, p3 = partials(v[1,1]), partials(v[2,1]), partials(v[3,1]) + p4, p5, p6 = partials(v[2,2]), partials(v[3,2]), partials(v[3,3]) + ∇f = SymmetricTensor{4, 3}((p1[1], p2[1], p3[1], p4[1], p5[1], p6[1], + p1[2], p2[2], p3[2], p4[2], p5[2], p6[2], + p1[3], p2[3], p3[3], p4[3], p5[3], p6[3], + p1[4], p2[4], p3[4], p4[4], p5[4], p6[4], + p1[5], p2[5], p3[5], p4[5], p5[5], p6[5], + p1[6], p2[6], p3[6], p4[6], p5[6], p6[6])) + end + return ∇f +end + +# Tensor{2} output, Vec input -> Tensor{3} gradient +@inline function _extract_gradient(v::Tensor{2, 3, <: Dual}, ::Vec{3}) + @inbounds begin + p1, p2, p3 = partials(v[1,1]), partials(v[2,1]), partials(v[3,1]) + p4, p5, p6 = partials(v[1,2]), partials(v[2,2]), partials(v[3,2]) + p7, p8, p9 = partials(v[1,3]), partials(v[2,3]), partials(v[3,3]) + ∇f = Tensor{3, 3}((p1[1], p2[1], p3[1], p4[1], p5[1], p6[1], p7[1], p8[1], p9[1], + p1[2], p2[2], p3[2], p4[2], p5[2], p6[2], p7[2], p8[2], p9[2], + p1[3], p2[3], p3[3], p4[3], p5[3], p6[3], p7[3], p8[3], p9[3])) + end + return ∇f +end + +# for non dual variable +@inline function _extract_value(v::Any) + return v +end +for TensorType in (Tensor, SymmetricTensor) + @eval begin + @inline function _extract_gradient(v::T, x::$TensorType{order, dim}) where {T<:Real, order, dim} + zero($TensorType{order, dim, T}) + end + @generated function _extract_gradient(v::$TensorType{order, dim, T}, ::$TensorType{order, dim}) where {T<:Real, order, dim} + RetType = $TensorType{order+order, dim, T} + return quote + $(Expr(:meta, :inline)) + zero($RetType) + end + end + end +end + +###################### +# Gradient insertion # +###################### + +# Insertions get the real value and derivative of a function, as well +# a tensor of dual values that was the initial input to that function. +# A new tensor of dual values are then created, to emulate the function +# being run with dual numbers (i.e. inserting the analytical gradient) +# As opposed to with gradient extraction, we don't have the original input +# (scalar or tensor) to the gradient function. But we can create this based +# on the tag created in the `gradient` function. +# Specifically, consider a function y=f(g(x)) where we want to supply the +# derivative df/dg (at g(x)). We then have "dy/dx = df/dg dg/dx" where +# the type of product is given by the type of g: +# g is 0th order: open product ^1 +# g is 1st order: single contraction +# g is 2nd order: double contraction +# +# ^1: Regular multiplication for scalars, but in case x and f +# are vectors, then it is open product. +# +# Support is given for the following function configurations +# g (input) f (output) dfdg (derivative) +# 2nd order 0th order 2nd order +# 1st order 1st order 2nd order +# 2nd order 2nd order 4th order + +# First, we define the API macro used to supply the analytical derivative +""" + @implement_gradient(f, f_dfdx) + +This macro allows specifying a function `f_dfdx` that provides an analytical +derivative of the function `f`, and is invoked when `f` is differentiated +using automatic differentiation based on `ForwardDiff.jl` +(e.g. when using `Tensors.jl`'s +[`gradient`](@ref) or [`hessian`](@ref)), or one of `ForwardDiff.jl`'s API). +The function `f_dfdx` must take +the same argument as `f` and should return both the value of `f` and +the gradient, i.e. `fval, dfdx_val = f_dfdx(x)`. The following combinations +of input and output types are supported: + +| `x` | `f(x)` | `dfdx` | +|:--------------------|:--------------------|:--------------------| +| `Number` | `Number` | `Number` | +| `Number` | `Vec` | `Vec` | +| `Number` | `SecondOrderTensor` | `SecondOrderTensor` | +| `Vec` | `Number` | `Vec` | +| `Vec` | `Vec` | `Tensor{2}` | +| `SecondOrderTensor` | `Number` | `SecondOrderTensor` | +| `SecondOrderTensor` | `SecondOrderTensor` | `FourthOrderTensor` | + +Note that if one tensor if of symmetric type, then all tensors must +be of symmetric type + +""" +macro implement_gradient(f, f_dfdx) + return :($(esc(f))(x :: Union{AbstractTensor{<:Any, <:Any, <:Dual}, Dual}) = _propagate_gradient($(esc(f_dfdx)), x)) +end +# which calls the general function _propagate_gradient that calls the specialized _insert_gradient method below +function _propagate_gradient(f_dfdx::Function, x::Union{AbstractTensor{<:Any, <:Any, <:Dual}, Dual}) + fval, dfdx_val = f_dfdx(_extract_value(x)) + return _insert_gradient(fval, dfdx_val, x) +end + +# Define the _insert_gradient method +""" + _insert_gradient(f::Union{Number,AbstractTensor}, dfdg::Union{Number,AbstractTensor}, g::ForwardDiff.Dual) + _insert_gradient(f::Union{Number,AbstractTensor}, dfdg::Union{Number,AbstractTensor}, g::Vec{<:Any,<:ForwardDiff.Dual}) + _insert_gradient(f::Union{Number,AbstractTensor}, dfdg::Union{Number,AbstractTensor}, g::SecondOrderTensor{<:Any,<:ForwardDiff.Dual}) + +Allows inserting an analytical gradient for use with automatic differentiation. +Consider a composed function ``h(f(g(x)))``, where you have an efficient way to +calculate ``\\partial f/\\partial g``, but want to use automatic +differentiation for the other functions. Then, you can make another definition +of ``f(g)`` to dispatch on if ``g`` is a tensor with `ForwardDiff.Dual` +entires, i.e. +```julia +function f(g::Tensor{2,dim,T}) where{dim, T<:ForwardDiff.Dual} + gval = _extract_value(g) # Get the non-dual tensor value + fval = f(gval) # Calculate function value + dfdg = dfdg_analytical(fval, gval) # Calculate analytical derivative + return _insert_gradient(fval, dfdg, g) # Return the updated dual tensor +end +``` + +""" +function _insert_gradient(f::Union{Number,AbstractTensor}, dfdg::Union{Number,AbstractTensor}, g::Dual{Tg}) where{Tg} + dgdx = _extract_gradient(g, _get_original_gradient_input(g)) + dfdx = dfdg ⊗ dgdx + return _insert_full_gradient(f, dfdx, Tg()) +end + +function _insert_gradient(f::Union{Number,AbstractTensor}, dfdg::Union{Number,AbstractTensor}, g::Vec{<:Any, <:Dual{Tg}}) where{Tg} + dgdx = _extract_gradient(g, _get_original_gradient_input(g)) + dfdx = dfdg ⋅ dgdx + return _insert_full_gradient(f, dfdx, Tg()) +end + +function _insert_gradient(f::Union{Number,AbstractTensor}, dfdg::Union{Number,AbstractTensor}, g::SecondOrderTensor{<:Any,<:Dual{Tg}}) where{Tg} + dgdx = _extract_gradient(g, _get_original_gradient_input(g)) + dfdx = dfdg ⊡ dgdx + return _insert_full_gradient(f, dfdx, Tg()) +end + +# Define helper function to figure out original input to gradient function +_get_original_gradient_input(::Dual{Tag{Tf,Tv}}) where{Tf,Tv} = zero(Tv) +_get_original_gradient_input(::AbstractTensor{<:Any,<:Any,<:Dual{Tag{Tf,Tv}}}) where{Tf,Tv} = zero(Tv) + +# Define helper function to insert_the_full_gradient calculated in _insert_gradient +_insert_full_gradient(f::Number, dfdx::Number, ::Tg) where{Tg} = Dual{Tg}(f, dfdx) +_insert_full_gradient(f::Number, dfdx::AbstractTensor, ::Tg) where{Tg} = Dual{Tg}(f, get_data(dfdx)) + +function _insert_full_gradient(f::TT, dfdx::TT, ::Tg) where{TT<:AbstractTensor,Tg} + fdata = get_data(f) + diffdata = get_data(dfdx) + TTb = get_base(TT) + @inbounds y = TTb(ntuple(i -> Dual{Tg}(fdata[i], diffdata[i]), length(fdata))) + return y +end + +function _insert_full_gradient(f::Vec{dim}, dfdx::Tensor{2,dim}, ::Tg) where{dim, Tg} + fdata = get_data(f) + diffdata = get_data(dfdx) + @inbounds y = Vec{dim}(i -> Dual{Tg}(fdata[i], ntuple(j->diffdata[i+dim*(j-1)], dim))) + return y +end + +function _insert_full_gradient(f::Tensor{2,dim,<:Any,N}, dfdx::Tensor{4,dim}, ::Tg) where{dim, N, Tg} + fdata = get_data(f) + diffdata = get_data(dfdx) + @inbounds y = Tensor{2,dim}(ntuple(i->Dual{Tg}(fdata[i], ntuple(j->diffdata[i+N*(j-1)],N)), N)) + return y +end +function _insert_full_gradient(f::SymmetricTensor{2,dim,<:Any,N}, dfdx::SymmetricTensor{4,dim}, ::Tg) where{dim, N, Tg} + fdata = get_data(f) + diffdata = get_data(dfdx) + @inbounds y = SymmetricTensor{2,dim}(ntuple(i->Dual{Tg}(fdata[i], ntuple(j->diffdata[i+N*(j-1)],N)), N)) + return y +end + + +################## +# Load functions # +################## + +# Loaders are supposed to take a tensor of real values and convert it +# into a tensor of dual values where the seeds are correctly defined. + +@inline function _load(v::Number, ::Tg) where Tg + return Dual{Tg}(v, one(v)) +end + +@inline function _load(v::Vec{1, T}, ::Tg) where {T, Tg} + @inbounds v_dual = Vec{1}((Dual{Tg}(v[1], one(T)),)) + return v_dual +end + +@inline function _load(v::Vec{2, T}, ::Tg) where {T, Tg} + o = one(T) + z = zero(T) + @inbounds v_dual = Vec{2}((Dual{Tg}(v[1], o, z), + Dual{Tg}(v[2], z, o))) + return v_dual +end + +@inline function _load(v::Vec{3, T}, ::Tg) where {T, Tg} + o = one(T) + z = zero(T) + @inbounds v_dual = Vec{3}((Dual{Tg}(v[1], o, z, z), + Dual{Tg}(v[2], z, o, z), + Dual{Tg}(v[3], z, z, o))) + return v_dual +end + +# Second order tensors +@inline function _load(v::Tensor{2, 1, T}, ::Tg) where {T, Tg} + @inbounds v_dual = Tensor{2, 1}((Dual{Tg}(get_data(v)[1], one(T)),)) + return v_dual +end + +@inline function _load(v::SymmetricTensor{2, 1, T}, ::Tg) where {T, Tg} + @inbounds v_dual = SymmetricTensor{2, 1}((Dual{Tg}(get_data(v)[1], one(T)),)) + return v_dual +end + +@inline function _load(v::Tensor{2, 2, T}, ::Tg) where {T, Tg} + data = get_data(v) + o = one(T) + z = zero(T) + @inbounds v_dual = Tensor{2, 2}((Dual{Tg}(data[1], o, z, z, z), + Dual{Tg}(data[2], z, o, z, z), + Dual{Tg}(data[3], z, z, o, z), + Dual{Tg}(data[4], z, z, z, o))) + return v_dual +end + +@inline function _load(v::SymmetricTensor{2, 2, T}, ::Tg) where {T, Tg} + data = get_data(v) + o = one(T) + o2 = convert(T, 1/2) + z = zero(T) + @inbounds v_dual = SymmetricTensor{2, 2}((Dual{Tg}(data[1], o, z, z), + Dual{Tg}(data[2], z, o2, z), + Dual{Tg}(data[3], z, z, o))) + return v_dual +end + +@inline function _load(v::Tensor{2, 3, T}, ::Tg) where {T, Tg} + data = get_data(v) + o = one(T) + z = zero(T) + @inbounds v_dual = Tensor{2, 3}((Dual{Tg}(data[1], o, z, z, z, z, z, z, z, z), + Dual{Tg}(data[2], z, o, z, z, z, z, z, z, z), + Dual{Tg}(data[3], z, z, o, z, z, z, z, z, z), + Dual{Tg}(data[4], z, z, z, o, z, z, z, z, z), + Dual{Tg}(data[5], z, z, z, z, o, z, z, z, z), + Dual{Tg}(data[6], z, z, z, z, z, o, z, z, z), + Dual{Tg}(data[7], z, z, z, z, z, z, o, z, z), + Dual{Tg}(data[8], z, z, z, z, z, z, z, o, z), + Dual{Tg}(data[9], z, z, z, z, z, z, z, z, o))) + return v_dual +end + +@inline function _load(v::SymmetricTensor{2, 3, T}, ::Tg) where {T, Tg} + data = get_data(v) + o = one(T) + o2 = convert(T, 1/2) + z = zero(T) + @inbounds v_dual = SymmetricTensor{2, 3}((Dual{Tg}(data[1], o, z, z, z, z, z), + Dual{Tg}(data[2], z, o2, z, z, z, z), + Dual{Tg}(data[3], z, z, o2, z, z, z), + Dual{Tg}(data[4], z, z, z, o, z, z), + Dual{Tg}(data[5], z, z, z, z, o2, z), + Dual{Tg}(data[6], z, z, z, z, z, o))) + return v_dual +end + +""" + gradient(f::Function, v::Union{SecondOrderTensor, Vec, Number}) + gradient(f::Function, v::Union{SecondOrderTensor, Vec, Number}, :all) + +Computes the gradient of the input function. If the (pseudo)-keyword `all` +is given, the value of the function is also returned as a second output argument. + +# Examples +```jldoctest +julia> A = rand(SymmetricTensor{2, 2}); + +julia> ∇f = gradient(norm, A) +2×2 SymmetricTensor{2, 2, Float64, 3}: + 0.434906 0.56442 + 0.56442 0.416793 + +julia> ∇f, f = gradient(norm, A, :all); +``` +""" +function gradient(f::F, v::V) where {F, V <: Union{SecondOrderTensor, Vec, Number}} + v_dual = _load(v, Tag(f, V)) + res = f(v_dual) + return _extract_gradient(res, v) +end +function gradient(f::F, v::V, ::Symbol) where {F, V <: Union{SecondOrderTensor, Vec, Number}} + v_dual = _load(v, Tag(f, V)) + res = f(v_dual) + return _extract_gradient(res, v), _extract_value(res) +end +const ∇ = gradient + +""" + hessian(f::Function, v::Union{SecondOrderTensor, Vec, Number}) + hessian(f::Function, v::Union{SecondOrderTensor, Vec, Number}, :all) + +Computes the hessian of the input function. If the (pseudo)-keyword `all` +is given, the lower order results (gradient and value) of the function is +also returned as a second and third output argument. + +# Examples +```jldoctest +julia> A = rand(SymmetricTensor{2, 2}); + +julia> ∇∇f = hessian(norm, A) +2×2×2×2 SymmetricTensor{4, 2, Float64, 9}: +[:, :, 1, 1] = + 0.596851 -0.180684 + -0.180684 -0.133425 + +[:, :, 2, 1] = + -0.180684 0.133546 + 0.133546 -0.173159 + +[:, :, 1, 2] = + -0.180684 0.133546 + 0.133546 -0.173159 + +[:, :, 2, 2] = + -0.133425 -0.173159 + -0.173159 0.608207 + +julia> ∇∇f, ∇f, f = hessian(norm, A, :all); +``` +""" +function hessian(f::F, v::Union{SecondOrderTensor, Vec, Number}) where {F} + gradf = y -> gradient(f, y) + return gradient(gradf, v) +end + +function hessian(f::F, v::Union{SecondOrderTensor, Vec, Number}, ::Symbol) where {F} + gradf = y -> gradient(f, y) + return gradient(gradf, v), gradient(f, v, :all)... +end +const ∇∇ = hessian + +""" + divergence(f, x) + +Calculate the divergence of the vector field `f`, in the point `x`. + +# Examples +```jldoctest +julia> f(x) = 2x; + +julia> x = rand(Vec{3}); + +julia> divergence(f, x) +6.0 +``` +""" +divergence(f::F, v::Vec) where {F<:Function} = tr(gradient(f, v)) + +""" + curl(f, x) + +Calculate the curl of the vector field `f`, in the point `x`. + +# Examples +```jldoctest +julia> f(x) = Vec{3}((x[2], x[3], -x[1])); + +julia> x = rand(Vec{3}); + +julia> curl(f, x) +3-element Vec{3, Float64}: + -1.0 + 1.0 + -1.0 +``` +""" +function curl(f::F, v::Vec{3}) where F + @inbounds begin + ∇f = gradient(f, v) + c = Vec{3}((∇f[3,2] - ∇f[2,3], ∇f[1,3] - ∇f[3,1], ∇f[2,1] - ∇f[1,2])) + end + return c +end +curl(f::F, v::Vec{1, T}) where {F, T} = curl(f, Vec{3}((v[1], T(0), T(0)))) +curl(f::F, v::Vec{2, T}) where {F, T} = curl(f, Vec{3}((v[1], v[2], T(0)))) + +""" + laplace(f, x) + +Calculate the laplacian of the field `f`, in the point `x`. +If `f` is a vector field, use broadcasting. + +# Examples +```jldoctest +julia> x = rand(Vec{3}); + +julia> f(x) = norm(x); + +julia> laplace(f, x) +1.7833701103136868 + +julia> g(x) = x*norm(x); + +julia> laplace.(g, x) +3-element Vec{3, Float64}: + 2.107389336871036 + 2.7349658311504834 + 2.019621767876747 +``` +""" +function laplace(f::F, v) where F + return divergence(x -> gradient(f, x), v) +end +const Δ = laplace + +function Broadcast.broadcasted(::typeof(laplace), f::F, v::V) where {F, V <: Vec{3}} + @inbounds begin + tag = Tag(f, V) + vdd = _load(_load(v, tag), tag) + res = f(vdd) + v1 = res[1].partials[1].partials[1] + res[1].partials[2].partials[2] + res[1].partials[3].partials[3] + v2 = res[2].partials[1].partials[1] + res[2].partials[2].partials[2] + res[2].partials[3].partials[3] + v3 = res[3].partials[1].partials[1] + res[3].partials[2].partials[2] + res[3].partials[3].partials[3] + end + return Vec{3}((v1, v2, v3)) +end diff --git a/src/basic_operations.jl b/src/basic_operations.jl index adfcf24a..d1e5bb14 100644 --- a/src/basic_operations.jl +++ b/src/basic_operations.jl @@ -1,65 +1,65 @@ -############### -# Simple Math # -############### - -# Unary -@inline Base.:+(S::AbstractTensor) = S -@inline Base.:-(S::AbstractTensor) = _map(-, S) - -# Binary -@inline Base.:+(S1::Tensor{order, dim}, S2::Tensor{order, dim}) where {order, dim} = _map(+, S1, S2) -@inline Base.:+(S1::SymmetricTensor{order, dim}, S2::SymmetricTensor{order, dim}) where {order, dim} = _map(+, S1, S2) -@inline Base.:+(S1::AbstractTensor{order, dim}, S2::AbstractTensor{order, dim}) where {order, dim} = ((SS1, SS2) = promote_base(S1, S2); SS1 + SS2) - -@inline Base.:-(S1::Tensor{order, dim}, S2::Tensor{order, dim}) where {order, dim} = _map(-, S1, S2) -@inline Base.:-(S1::SymmetricTensor{order, dim}, S2::SymmetricTensor{order, dim}) where {order, dim} = _map(-, S1, S2) -@inline Base.:-(S1::AbstractTensor{order, dim}, S2::AbstractTensor{order, dim}) where {order, dim} = ((SS1, SS2) = promote_base(S1, S2); SS1 - SS2) - -@inline Base.:*(S::AbstractTensor, n::Number) = _map(x -> x*n, S) -@inline Base.:*(n::Number, S::AbstractTensor) = _map(x -> n*x, S) -@inline Base.:/(S::AbstractTensor, n::Number) = _map(x -> x/n, S) - -Base.:+(S1::AbstractTensor, S2::AbstractTensor) = throw(DimensionMismatch("dimension and order must match")) -Base.:-(S1::AbstractTensor, S2::AbstractTensor) = throw(DimensionMismatch("dimension and order must match")) - -# map implementations -@inline function _map(f, S::AbstractTensor) - return apply_all(S, @inline function(i) @inbounds f(S.data[i]); end) -end - -# the caller of 2 arg _map MUST guarantee that both arguments have -# the same base (Tensor{order, dim} / SymmetricTensor{order, dim}) but not necessarily the same eltype -@inline function _map(f, S1::AllTensors, S2::AllTensors) - return apply_all(S1, @inline function(i) @inbounds f(S1.data[i], S2.data[i]); end) -end - -# power -@inline Base.literal_pow(::typeof(^), S::SecondOrderTensor, ::Val{-1}) = inv(S) -@inline Base.literal_pow(::typeof(^), S::SecondOrderTensor, ::Val{0}) = one(S) -@inline Base.literal_pow(::typeof(^), S::SecondOrderTensor, ::Val{1}) = S -@inline function Base.literal_pow(::typeof(^), S1::SecondOrderTensor, ::Val{p}) where {p} - p > 0 ? (S2 = S1; q = p) : (S2 = inv(S1); q = -p) - S3 = S2 - for i in 2:q - S2 = _powdot(S2, S3) - end - S2 -end - -@inline _powdot(S1::Tensor, S2::Tensor) = dot(S1, S2) -@generated function _powdot(S1::SymmetricTensor{2, dim}, S2::SymmetricTensor{2, dim}) where {dim} - idxS1(i, j) = compute_index(get_base(S1), i, j) - idxS2(i, j) = compute_index(get_base(S2), i, j) - exps = Expr(:tuple) - for j in 1:dim, i in j:dim - ex1 = Expr[:(get_data(S1)[$(idxS1(i, k))]) for k in 1:dim] - ex2 = Expr[:(get_data(S2)[$(idxS2(k, j))]) for k in 1:dim] - push!(exps.args, reducer(ex1, ex2)) - end - quote - $(Expr(:meta, :inline)) - @inbounds return SymmetricTensor{2, dim}($exps) - end -end - +############### +# Simple Math # +############### + +# Unary +@inline Base.:+(S::AbstractTensor) = S +@inline Base.:-(S::AbstractTensor) = _map(-, S) + +# Binary +@inline Base.:+(S1::Tensor{order, dim}, S2::Tensor{order, dim}) where {order, dim} = _map(+, S1, S2) +@inline Base.:+(S1::SymmetricTensor{order, dim}, S2::SymmetricTensor{order, dim}) where {order, dim} = _map(+, S1, S2) +@inline Base.:+(S1::AbstractTensor{order, dim}, S2::AbstractTensor{order, dim}) where {order, dim} = ((SS1, SS2) = promote_base(S1, S2); SS1 + SS2) + +@inline Base.:-(S1::Tensor{order, dim}, S2::Tensor{order, dim}) where {order, dim} = _map(-, S1, S2) +@inline Base.:-(S1::SymmetricTensor{order, dim}, S2::SymmetricTensor{order, dim}) where {order, dim} = _map(-, S1, S2) +@inline Base.:-(S1::AbstractTensor{order, dim}, S2::AbstractTensor{order, dim}) where {order, dim} = ((SS1, SS2) = promote_base(S1, S2); SS1 - SS2) + +@inline Base.:*(S::AbstractTensor, n::Number) = _map(x -> x*n, S) +@inline Base.:*(n::Number, S::AbstractTensor) = _map(x -> n*x, S) +@inline Base.:/(S::AbstractTensor, n::Number) = _map(x -> x/n, S) + +Base.:+(S1::AbstractTensor, S2::AbstractTensor) = throw(DimensionMismatch("dimension and order must match")) +Base.:-(S1::AbstractTensor, S2::AbstractTensor) = throw(DimensionMismatch("dimension and order must match")) + +# map implementations +@inline function _map(f, S::AbstractTensor) + return apply_all(S, @inline function(i) @inbounds f(S.data[i]); end) +end + +# the caller of 2 arg _map MUST guarantee that both arguments have +# the same base (Tensor{order, dim} / SymmetricTensor{order, dim}) but not necessarily the same eltype +@inline function _map(f, S1::AllTensors, S2::AllTensors) + return apply_all(S1, @inline function(i) @inbounds f(S1.data[i], S2.data[i]); end) +end + +# power +@inline Base.literal_pow(::typeof(^), S::SecondOrderTensor, ::Val{-1}) = inv(S) +@inline Base.literal_pow(::typeof(^), S::SecondOrderTensor, ::Val{0}) = one(S) +@inline Base.literal_pow(::typeof(^), S::SecondOrderTensor, ::Val{1}) = S +@inline function Base.literal_pow(::typeof(^), S1::SecondOrderTensor, ::Val{p}) where {p} + p > 0 ? (S2 = S1; q = p) : (S2 = inv(S1); q = -p) + S3 = S2 + for i in 2:q + S2 = _powdot(S2, S3) + end + S2 +end + +@inline _powdot(S1::Tensor, S2::Tensor) = dot(S1, S2) +@generated function _powdot(S1::SymmetricTensor{2, dim}, S2::SymmetricTensor{2, dim}) where {dim} + idxS1(i, j) = compute_index(get_base(S1), i, j) + idxS2(i, j) = compute_index(get_base(S2), i, j) + exps = Expr(:tuple) + for j in 1:dim, i in j:dim + ex1 = Expr[:(get_data(S1)[$(idxS1(i, k))]) for k in 1:dim] + ex2 = Expr[:(get_data(S2)[$(idxS2(k, j))]) for k in 1:dim] + push!(exps.args, reducer(ex1, ex2)) + end + quote + $(Expr(:meta, :inline)) + @inbounds return SymmetricTensor{2, dim}($exps) + end +end + Base.iszero(a::AbstractTensor) = all(iszero, get_data(a)) \ No newline at end of file diff --git a/src/constructors.jl b/src/constructors.jl index 7a48fcca..8e06381c 100644 --- a/src/constructors.jl +++ b/src/constructors.jl @@ -1,171 +1,171 @@ -# Type constructors e.g. Tensor{2, 3}(arg) - -# Tensor from function -@generated function (S::Union{Type{Tensor{order, dim}}, Type{SymmetricTensor{order, dim}}})(f::Function) where {order, dim} - TensorType = get_base(get_type(S)) - if order == 1 - exp = tensor_create(TensorType, (i) -> :(f($i))) - elseif order == 2 - exp = tensor_create(TensorType, (i,j) -> :(f($i, $j))) - elseif order == 3 - exp = tensor_create(TensorType, (i,j,k) -> :(f($i, $j, $k))) - elseif order == 4 - exp = tensor_create(TensorType, (i,j,k,l) -> :(f($i, $j, $k, $l))) - end - quote - $(Expr(:meta, :inline)) - @inbounds return $TensorType($exp) - end -end - -# Applies the function f to all indices f(1), f(2), ... f(n_independent_components) -@generated function apply_all(S::Union{Type{Tensor{order, dim}}, Type{SymmetricTensor{order, dim}}}, f::Function) where {order, dim} - TensorType = get_base(get_type(S)) - exp = tensor_create_linear(TensorType, (i) -> :(f($i))) - quote - $(Expr(:meta, :inline)) - @inbounds return $TensorType($exp) - end -end - -@inline function apply_all(S::Union{Tensor{order, dim}, SymmetricTensor{order, dim}}, f::Function) where {order, dim} - apply_all(get_base(typeof(S)), f) -end - -# Tensor from AbstractArray -function Tensor{order, dim}(data::AbstractArray) where {order, dim} - N = n_components(Tensor{order, dim}) - length(data) != n_components(Tensor{order, dim}) && throw(ArgumentError("wrong number of elements, expected $N, got $(length(data))")) - return apply_all(Tensor{order, dim}, @inline function(i) @inbounds data[i]; end) -end - - -# SymmetricTensor from AbstractArray -@generated function SymmetricTensor{order, dim}(data::AbstractArray) where {order, dim} - N = n_components(Tensor{order,dim}) - expN = Expr(:tuple, [:(data[$i]) for i in 1:N]...) - M = n_components(SymmetricTensor{order,dim}) - expM = Expr(:tuple, [:(data[$i]) for i in 1:M]...) - return quote - L = length(data) - if L != $N && L != $M - throw(ArgumentError("wrong number of vector elements, expected $($N) or $($M), got $L")) - end - if L == $M - @inbounds return SymmetricTensor{order, dim}($expM) - end - @inbounds S = Tensor{order, dim}($expN) - return convert(SymmetricTensor{order, dim}, S) - end -end - -# one (identity tensor) -for TensorType in (SymmetricTensor, Tensor) - @eval begin - @inline Base.one(::Type{$(TensorType){order, dim}}) where {order, dim} = one($TensorType{order, dim, Float64}) - @inline Base.one(::Type{$(TensorType){order, dim, T, M}}) where {order, dim, T, M} = one($TensorType{order, dim, T}) - @inline Base.one(::$TensorType{order, dim, T}) where {order, dim, T} = one($TensorType{order, dim, T}) - - @generated function Base.one(S::Type{$(TensorType){order, dim, T}}) where {order, dim, T} - !(order in (2,4)) && throw(ArgumentError("`one` only defined for order 2 and 4")) - δ = (i,j) -> i == j ? :(o) : :(z) - ReturnTensor = get_base(get_type(S)) - if order == 2 - f = (i,j) -> :($(δ(i,j))) - elseif order == 4 && $TensorType == Tensor - f = (i,j,k,l) -> :($(δ(i,k)) * $(δ(j,l))) - else # order == 4 && TensorType == SymmetricTensor - f = (i,j,k,l) -> :(($(δ(i,k)) * $(δ(j,l)) + $(δ(i,l))* $(δ(j,k))) / 2) - end - exp = tensor_create(ReturnTensor, f) - return quote - $(Expr(:meta, :inline)) - o = one(T) - z = zero(o) # zero-no-unit(T) - $ReturnTensor($exp) - end - end - end -end - -# zero, one, rand -for (op, el) in ((:zero, :(zero(T))), (:ones, :(one(T))), (:rand, :(()->rand(T))), (:randn,:(()->randn(T)))) -for TensorType in (SymmetricTensor, Tensor) - @eval begin - @inline Base.$op(::Type{$TensorType{order, dim}}) where {order, dim} = $op($TensorType{order, dim, Float64}) - @inline Base.$op(::Type{$TensorType{order, dim, T, N}}) where {order, dim, T, N} = $op($TensorType{order, dim, T}) - @inline Base.$op(::Type{$TensorType{order, dim, T}}) where {order, dim, T} = fill($el, $TensorType{order, dim}) - end -end -@eval @inline Base.$op(S::Type{Vec{dim}}) where {dim} = $op(Vec{dim, Float64}) -@eval @inline Base.$op(t::AllTensors) = $op(typeof(t)) -end - -@inline Base.fill(el::Number, S::Type{T}) where {T <: Union{Tensor, SymmetricTensor}} = apply_all(get_base(T), i -> el) -@inline Base.fill(f::Function, S::Type{T}) where {T <: Union{Tensor, SymmetricTensor}} = apply_all(get_base(T), i -> f()) - -# Array with zero/ones -@inline Base.zeros(::Type{T}, dims::Int...) where {T <: Union{Tensor, SymmetricTensor}} = fill(zero(T), (dims)) -@inline Base.ones(::Type{T}, dims::Int...) where {T <: Union{Tensor, SymmetricTensor}} = fill(one(T), (dims)) - -# diagm -@generated function LinearAlgebra.diagm(S::Type{T}, v::Union{AbstractVector, Tuple}) where {T <: SecondOrderTensor} - TensorType = get_base(get_type(S)) - ET = eltype(get_type(S)) == Any ? eltype(v) : eltype(get_type(S)) # lol - f = (i,j) -> i == j ? :($ET(v[$i])) : :(o) - exp = tensor_create(TensorType, f) - return quote - $(Expr(:meta, :inline)) - o = zero($ET) - @inbounds return $TensorType($exp) - end -end -@inline LinearAlgebra.diagm(::Type{Tensor{2, dim}}, v::T) where {dim, T<:Number} = v * one(Tensor{2, dim, T}) -@inline LinearAlgebra.diagm(::Type{SymmetricTensor{2, dim}}, v::T) where {dim, T<:Number} = v * one(SymmetricTensor{2, dim, T}) - -""" - basevec(::Type{Vec{dim, T}}) - basevec(::Type{Vec{dim, T}}, i) - basevec(::Vec{dim, T}) - basevec(::Vec{dim, T}, i) - -Return a tuple with the base vectors corresponding to the dimension `dim` and type -`T`. An optional integer `i` can be used to extract the i:th base vector. -The alias `eᵢ` can also be used, written `e\\_i`. - -# Examples -```jldoctest -julia> eᵢ(Vec{2, Float64}) -([1.0, 0.0], [0.0, 1.0]) - -julia> eᵢ(Vec{2, Float64}, 2) -2-element Vec{2, Float64}: - 0.0 - 1.0 -``` -""" -@inline function basevec(::Type{Vec{1, T}}) where {T} - o = one(T) - return (Vec{1, T}((o,)), ) -end -@inline function basevec(::Type{Vec{2, T}}) where {T} - o = one(T) - z = zero(T) - return (Vec{2, T}((o, z)), - Vec{2, T}((z, o))) -end -@inline function basevec(::Type{Vec{3, T}}) where {T} - o = one(T) - z = zero(T) - return (Vec{3, T}((o, z, z)), - Vec{3, T}((z, o, z)), - Vec{3, T}((z, z, o))) -end - -@inline basevec(::Type{Vec{dim}}) where {dim} = basevec(Vec{dim, Float64}) -@inline basevec(::Type{Vec{dim, T}}, i::Int) where {dim, T} = basevec(Vec{dim, T})[i] -@inline basevec(::Type{Vec{dim}}, i::Int) where {dim} = basevec(Vec{dim, Float64})[i] -@inline basevec(v::Vec{dim, T}) where {dim, T} = basevec(typeof(v)) -@inline basevec(v::Vec{dim, T}, i::Int) where {dim, T} = basevec(typeof(v), i) - -const eᵢ = basevec +# Type constructors e.g. Tensor{2, 3}(arg) + +# Tensor from function +@generated function (S::Union{Type{Tensor{order, dim}}, Type{SymmetricTensor{order, dim}}})(f::Function) where {order, dim} + TensorType = get_base(get_type(S)) + if order == 1 + exp = tensor_create(TensorType, (i) -> :(f($i))) + elseif order == 2 + exp = tensor_create(TensorType, (i,j) -> :(f($i, $j))) + elseif order == 3 + exp = tensor_create(TensorType, (i,j,k) -> :(f($i, $j, $k))) + elseif order == 4 + exp = tensor_create(TensorType, (i,j,k,l) -> :(f($i, $j, $k, $l))) + end + quote + $(Expr(:meta, :inline)) + @inbounds return $TensorType($exp) + end +end + +# Applies the function f to all indices f(1), f(2), ... f(n_independent_components) +@generated function apply_all(S::Union{Type{Tensor{order, dim}}, Type{SymmetricTensor{order, dim}}}, f::Function) where {order, dim} + TensorType = get_base(get_type(S)) + exp = tensor_create_linear(TensorType, (i) -> :(f($i))) + quote + $(Expr(:meta, :inline)) + @inbounds return $TensorType($exp) + end +end + +@inline function apply_all(S::Union{Tensor{order, dim}, SymmetricTensor{order, dim}}, f::Function) where {order, dim} + apply_all(get_base(typeof(S)), f) +end + +# Tensor from AbstractArray +function Tensor{order, dim}(data::AbstractArray) where {order, dim} + N = n_components(Tensor{order, dim}) + length(data) != n_components(Tensor{order, dim}) && throw(ArgumentError("wrong number of elements, expected $N, got $(length(data))")) + return apply_all(Tensor{order, dim}, @inline function(i) @inbounds data[i]; end) +end + + +# SymmetricTensor from AbstractArray +@generated function SymmetricTensor{order, dim}(data::AbstractArray) where {order, dim} + N = n_components(Tensor{order,dim}) + expN = Expr(:tuple, [:(data[$i]) for i in 1:N]...) + M = n_components(SymmetricTensor{order,dim}) + expM = Expr(:tuple, [:(data[$i]) for i in 1:M]...) + return quote + L = length(data) + if L != $N && L != $M + throw(ArgumentError("wrong number of vector elements, expected $($N) or $($M), got $L")) + end + if L == $M + @inbounds return SymmetricTensor{order, dim}($expM) + end + @inbounds S = Tensor{order, dim}($expN) + return convert(SymmetricTensor{order, dim}, S) + end +end + +# one (identity tensor) +for TensorType in (SymmetricTensor, Tensor) + @eval begin + @inline Base.one(::Type{$(TensorType){order, dim}}) where {order, dim} = one($TensorType{order, dim, Float64}) + @inline Base.one(::Type{$(TensorType){order, dim, T, M}}) where {order, dim, T, M} = one($TensorType{order, dim, T}) + @inline Base.one(::$TensorType{order, dim, T}) where {order, dim, T} = one($TensorType{order, dim, T}) + + @generated function Base.one(S::Type{$(TensorType){order, dim, T}}) where {order, dim, T} + !(order in (2,4)) && throw(ArgumentError("`one` only defined for order 2 and 4")) + δ = (i,j) -> i == j ? :(o) : :(z) + ReturnTensor = get_base(get_type(S)) + if order == 2 + f = (i,j) -> :($(δ(i,j))) + elseif order == 4 && $TensorType == Tensor + f = (i,j,k,l) -> :($(δ(i,k)) * $(δ(j,l))) + else # order == 4 && TensorType == SymmetricTensor + f = (i,j,k,l) -> :(($(δ(i,k)) * $(δ(j,l)) + $(δ(i,l))* $(δ(j,k))) / 2) + end + exp = tensor_create(ReturnTensor, f) + return quote + $(Expr(:meta, :inline)) + o = one(T) + z = zero(o) # zero-no-unit(T) + $ReturnTensor($exp) + end + end + end +end + +# zero, one, rand +for (op, el) in ((:zero, :(zero(T))), (:ones, :(one(T))), (:rand, :(()->rand(T))), (:randn,:(()->randn(T)))) +for TensorType in (SymmetricTensor, Tensor) + @eval begin + @inline Base.$op(::Type{$TensorType{order, dim}}) where {order, dim} = $op($TensorType{order, dim, Float64}) + @inline Base.$op(::Type{$TensorType{order, dim, T, N}}) where {order, dim, T, N} = $op($TensorType{order, dim, T}) + @inline Base.$op(::Type{$TensorType{order, dim, T}}) where {order, dim, T} = fill($el, $TensorType{order, dim}) + end +end +@eval @inline Base.$op(S::Type{Vec{dim}}) where {dim} = $op(Vec{dim, Float64}) +@eval @inline Base.$op(t::AllTensors) = $op(typeof(t)) +end + +@inline Base.fill(el::Number, S::Type{T}) where {T <: Union{Tensor, SymmetricTensor}} = apply_all(get_base(T), i -> el) +@inline Base.fill(f::Function, S::Type{T}) where {T <: Union{Tensor, SymmetricTensor}} = apply_all(get_base(T), i -> f()) + +# Array with zero/ones +@inline Base.zeros(::Type{T}, dims::Int...) where {T <: Union{Tensor, SymmetricTensor}} = fill(zero(T), (dims)) +@inline Base.ones(::Type{T}, dims::Int...) where {T <: Union{Tensor, SymmetricTensor}} = fill(one(T), (dims)) + +# diagm +@generated function LinearAlgebra.diagm(S::Type{T}, v::Union{AbstractVector, Tuple}) where {T <: SecondOrderTensor} + TensorType = get_base(get_type(S)) + ET = eltype(get_type(S)) == Any ? eltype(v) : eltype(get_type(S)) # lol + f = (i,j) -> i == j ? :($ET(v[$i])) : :(o) + exp = tensor_create(TensorType, f) + return quote + $(Expr(:meta, :inline)) + o = zero($ET) + @inbounds return $TensorType($exp) + end +end +@inline LinearAlgebra.diagm(::Type{Tensor{2, dim}}, v::T) where {dim, T<:Number} = v * one(Tensor{2, dim, T}) +@inline LinearAlgebra.diagm(::Type{SymmetricTensor{2, dim}}, v::T) where {dim, T<:Number} = v * one(SymmetricTensor{2, dim, T}) + +""" + basevec(::Type{Vec{dim, T}}) + basevec(::Type{Vec{dim, T}}, i) + basevec(::Vec{dim, T}) + basevec(::Vec{dim, T}, i) + +Return a tuple with the base vectors corresponding to the dimension `dim` and type +`T`. An optional integer `i` can be used to extract the i:th base vector. +The alias `eᵢ` can also be used, written `e\\_i`. + +# Examples +```jldoctest +julia> eᵢ(Vec{2, Float64}) +([1.0, 0.0], [0.0, 1.0]) + +julia> eᵢ(Vec{2, Float64}, 2) +2-element Vec{2, Float64}: + 0.0 + 1.0 +``` +""" +@inline function basevec(::Type{Vec{1, T}}) where {T} + o = one(T) + return (Vec{1, T}((o,)), ) +end +@inline function basevec(::Type{Vec{2, T}}) where {T} + o = one(T) + z = zero(T) + return (Vec{2, T}((o, z)), + Vec{2, T}((z, o))) +end +@inline function basevec(::Type{Vec{3, T}}) where {T} + o = one(T) + z = zero(T) + return (Vec{3, T}((o, z, z)), + Vec{3, T}((z, o, z)), + Vec{3, T}((z, z, o))) +end + +@inline basevec(::Type{Vec{dim}}) where {dim} = basevec(Vec{dim, Float64}) +@inline basevec(::Type{Vec{dim, T}}, i::Int) where {dim, T} = basevec(Vec{dim, T})[i] +@inline basevec(::Type{Vec{dim}}, i::Int) where {dim} = basevec(Vec{dim, Float64})[i] +@inline basevec(v::Vec{dim, T}) where {dim, T} = basevec(typeof(v)) +@inline basevec(v::Vec{dim, T}, i::Int) where {dim, T} = basevec(typeof(v), i) + +const eᵢ = basevec diff --git a/src/eigen.jl b/src/eigen.jl index 5ef869a2..7b52cb77 100644 --- a/src/eigen.jl +++ b/src/eigen.jl @@ -1,105 +1,105 @@ -# Specify conversion to static arrays for 2nd order symmetric tensors -to_smatrix(a::SymmetricTensor{2, dim, T}) where {dim, T} = Symmetric(SMatrix{dim, dim, T}(a)) - - -""" - eigvals(::SymmetricTensor) - -Compute the eigenvalues of a symmetric tensor. -""" -@inline LinearAlgebra.eigvals(S::SymmetricTensor{4}) = eigvals(eigen(S)) -@inline LinearAlgebra.eigvals(S::SymmetricTensor{2,dim,T}) where{dim,T} = convert(Vec{dim,T}, Vec{dim}(eigvals(to_smatrix(ustrip(S))))) - -""" - eigvecs(::SymmetricTensor) - -Compute the eigenvectors of a symmetric tensor. -""" -@inline LinearAlgebra.eigvecs(S::SymmetricTensor) = eigvecs(eigen(S)) - -struct Eigen{T, S, dim, M} - values::Vec{dim, T} - vectors::Tensor{2, dim, S, M} -end - -struct FourthOrderEigen{dim,T,S,M} - values::Vector{T} - vectors::Vector{SymmetricTensor{2,dim,S,M}} -end - -# destructure via iteration -function Base.iterate(E::Union{Eigen,FourthOrderEigen}, state::Int=1) - return iterate((eigvals(E), eigvecs(E)), state) -end - -""" - eigen(A::SymmetricTensor{2}) - -Compute the eigenvalues and eigenvectors of a symmetric second order tensor -and return an `Eigen` object. The eigenvalues are stored in a `Vec`, -sorted in ascending order. The corresponding eigenvectors are stored -as the columns of a `Tensor`. - -See [`eigvals`](@ref) and [`eigvecs`](@ref). - -# Examples -```jldoctest -julia> A = rand(SymmetricTensor{2, 2}); - -julia> E = eigen(A); - -julia> E.values -2-element Vec{2, Float64}: - -0.1883547111127678 - 1.345436766284664 - -julia> E.vectors -2×2 Tensor{2, 2, Float64, 4}: - -0.701412 0.712756 - 0.712756 0.701412 -``` -""" -LinearAlgebra.eigen(::SymmetricTensor{2}) - -""" - eigvals(::Union{Eigen,FourthOrderEigen}) - -Extract eigenvalues from an `Eigen` or `FourthOrderEigen` object, -returned by [`eigen`](@ref). -""" -@inline LinearAlgebra.eigvals(E::Union{Eigen,FourthOrderEigen}) = E.values -""" - eigvecs(::Union{Eigen,FourthOrderEigen}) - -Extract eigenvectors from an `Eigen` or `FourthOrderEigen` object, -returned by [`eigen`](@ref). -""" -@inline LinearAlgebra.eigvecs(E::Union{Eigen,FourthOrderEigen}) = E.vectors - -""" - eigen(A::SymmetricTensor{4}) - -Compute the eigenvalues and second order eigentensors of a symmetric fourth -order tensor and return an `FourthOrderEigen` object. The eigenvalues and -eigentensors are sorted in ascending order of the eigenvalues. - -See also [`eigvals`](@ref) and [`eigvecs`](@ref). -""" -function LinearAlgebra.eigen(R::SymmetricTensor{4,dim,T′}) where {dim,T′} - S = ustrip(R) - T = eltype(S) - E = eigen(Hermitian(tomandel(S))) - values = E.values isa Vector{T′} ? E.values : T′[T′(v) for v in E.values] - vectors = [frommandel(SymmetricTensor{2,dim,T}, view(E.vectors, :, i)) for i in 1:size(E.vectors, 2)] - return FourthOrderEigen(values, vectors) -end - -# Use specialized for 1d as this can be inlined -@inline function LinearAlgebra.eigen(S::SymmetricTensor{2, 1, T}) where {T} - @inbounds Eigen(Vec{1, T}((S[1, 1],)), one(Tensor{2, 1, T})) -end - -function LinearAlgebra.eigen(R::SymmetricTensor{2,dim,T}) where{dim,T} - e = eigen(to_smatrix(ustrip(R))) - return Tensors.Eigen(convert(Vec{dim,T}, Vec{dim}(e.values)), Tensor{2,dim}(e.vectors)) +# Specify conversion to static arrays for 2nd order symmetric tensors +to_smatrix(a::SymmetricTensor{2, dim, T}) where {dim, T} = Symmetric(SMatrix{dim, dim, T}(a)) + + +""" + eigvals(::SymmetricTensor) + +Compute the eigenvalues of a symmetric tensor. +""" +@inline LinearAlgebra.eigvals(S::SymmetricTensor{4}) = eigvals(eigen(S)) +@inline LinearAlgebra.eigvals(S::SymmetricTensor{2,dim,T}) where{dim,T} = convert(Vec{dim,T}, Vec{dim}(eigvals(to_smatrix(ustrip(S))))) + +""" + eigvecs(::SymmetricTensor) + +Compute the eigenvectors of a symmetric tensor. +""" +@inline LinearAlgebra.eigvecs(S::SymmetricTensor) = eigvecs(eigen(S)) + +struct Eigen{T, S, dim, M} + values::Vec{dim, T} + vectors::Tensor{2, dim, S, M} +end + +struct FourthOrderEigen{dim,T,S,M} + values::Vector{T} + vectors::Vector{SymmetricTensor{2,dim,S,M}} +end + +# destructure via iteration +function Base.iterate(E::Union{Eigen,FourthOrderEigen}, state::Int=1) + return iterate((eigvals(E), eigvecs(E)), state) +end + +""" + eigen(A::SymmetricTensor{2}) + +Compute the eigenvalues and eigenvectors of a symmetric second order tensor +and return an `Eigen` object. The eigenvalues are stored in a `Vec`, +sorted in ascending order. The corresponding eigenvectors are stored +as the columns of a `Tensor`. + +See [`eigvals`](@ref) and [`eigvecs`](@ref). + +# Examples +```jldoctest +julia> A = rand(SymmetricTensor{2, 2}); + +julia> E = eigen(A); + +julia> E.values +2-element Vec{2, Float64}: + -0.1883547111127678 + 1.345436766284664 + +julia> E.vectors +2×2 Tensor{2, 2, Float64, 4}: + -0.701412 0.712756 + 0.712756 0.701412 +``` +""" +LinearAlgebra.eigen(::SymmetricTensor{2}) + +""" + eigvals(::Union{Eigen,FourthOrderEigen}) + +Extract eigenvalues from an `Eigen` or `FourthOrderEigen` object, +returned by [`eigen`](@ref). +""" +@inline LinearAlgebra.eigvals(E::Union{Eigen,FourthOrderEigen}) = E.values +""" + eigvecs(::Union{Eigen,FourthOrderEigen}) + +Extract eigenvectors from an `Eigen` or `FourthOrderEigen` object, +returned by [`eigen`](@ref). +""" +@inline LinearAlgebra.eigvecs(E::Union{Eigen,FourthOrderEigen}) = E.vectors + +""" + eigen(A::SymmetricTensor{4}) + +Compute the eigenvalues and second order eigentensors of a symmetric fourth +order tensor and return an `FourthOrderEigen` object. The eigenvalues and +eigentensors are sorted in ascending order of the eigenvalues. + +See also [`eigvals`](@ref) and [`eigvecs`](@ref). +""" +function LinearAlgebra.eigen(R::SymmetricTensor{4,dim,T′}) where {dim,T′} + S = ustrip(R) + T = eltype(S) + E = eigen(Hermitian(tomandel(S))) + values = E.values isa Vector{T′} ? E.values : T′[T′(v) for v in E.values] + vectors = [frommandel(SymmetricTensor{2,dim,T}, view(E.vectors, :, i)) for i in 1:size(E.vectors, 2)] + return FourthOrderEigen(values, vectors) +end + +# Use specialized for 1d as this can be inlined +@inline function LinearAlgebra.eigen(S::SymmetricTensor{2, 1, T}) where {T} + @inbounds Eigen(Vec{1, T}((S[1, 1],)), one(Tensor{2, 1, T})) +end + +function LinearAlgebra.eigen(R::SymmetricTensor{2,dim,T}) where{dim,T} + e = eigen(to_smatrix(ustrip(R))) + return Tensors.Eigen(convert(Vec{dim,T}, Vec{dim}(e.values)), Tensor{2,dim}(e.vectors)) end \ No newline at end of file diff --git a/src/indexing.jl b/src/indexing.jl index 5517b588..c9ef5033 100644 --- a/src/indexing.jl +++ b/src/indexing.jl @@ -1,101 +1,104 @@ -############ -# Indexing # -############ - -@inline function compute_index(::Type{SymmetricTensor{2, dim}}, i::Int, j::Int) where {dim} - if i < j - i, j = j, i - end - # We are skipping triangle over the diagonal = (j-1) * j / 2 indices - skipped_indicies = div((j-1) * j, 2) - return dim*(j-1) + i - skipped_indicies -end - -@inline function compute_index(::Type{Tensor{2, dim}}, i::Int, j::Int) where {dim} - return dim*(j-1) + i -end - -@inline function compute_index(::Type{Tensor{3, dim}}, i::Int, j::Int, k::Int) where {dim} - lower_order = Tensor{2, dim} - I = compute_index(lower_order, i, j) - n = n_components(lower_order) - return (k-1) * n + I -end - -@inline function compute_index(::Type{Tensor{4, dim}}, i::Int, j::Int, k::Int, l::Int) where {dim} - lower_order = Tensor{2, dim} - I = compute_index(lower_order, i, j) - J = compute_index(lower_order, k, l) - n = n_components(lower_order) - return (J-1) * n + I -end - -@inline function compute_index(::Type{SymmetricTensor{4, dim}}, i::Int, j::Int, k::Int, l::Int) where {dim} - lower_order = SymmetricTensor{2, dim} - I = compute_index(lower_order, i, j) - J = compute_index(lower_order, k, l) - n = n_components(lower_order) - return (J-1) * n + I -end - -# indexed with [order][dim] -const SYMMETRIC_INDICES = ((), ([1,], [1, 2, 4], [1, 2, 3, 5, 6, 9]), (), - ([1,], [1, 2, 4, 5, 6, 8, 13, 14, 16], [ 1, 2, - 3, 5, 6, 9, 10, 11, 12, 14, 15, 18, 19, 20, - 21, 23, 24, 27, 37, 38, 39, 41, 42, 45, 46, 47, - 48, 50, 51, 54, 73, 74, 75, 77, 78, 81])) - -########################### -# getindex general tensor # -########################### -@inline function Base.getindex(S::Tensor, i::Int) - @boundscheck checkbounds(S, i) - @inbounds v = get_data(S)[i] - return v -end - -@inline function Base.getindex(S::SymmetricTensor{2, dim}, i::Int, j::Int) where {dim} - @boundscheck checkbounds(S, i, j) - @inbounds v = get_data(S)[compute_index(SymmetricTensor{2, dim}, i, j)] - return v -end - -@inline function Base.getindex(S::SymmetricTensor{4, dim}, i::Int, j::Int, k::Int, l::Int) where {dim} - @boundscheck checkbounds(S, i, j, k, l) - @inbounds v = get_data(S)[compute_index(SymmetricTensor{4, dim}, i, j, k, l)] - return v -end - -# Slice -@inline Base.getindex(v::Vec, ::Colon) = v - -function Base.getindex(S::Union{SecondOrderTensor, Tensor{3}, FourthOrderTensor}, ::Colon) - throw(ArgumentError("S[:] not defined for S of order 2, 3, or 4, use Array(S) to convert to an Array")) -end - -@inline @generated function Base.getindex(S::SecondOrderTensor{dim}, ::Colon, j::Int) where {dim} - idx2(i,j) = compute_index(get_base(S), i, j) - ex1 = Expr(:tuple, [:(get_data(S)[$(idx2(i,1))]) for i in 1:dim]...) - ex2 = Expr(:tuple, [:(get_data(S)[$(idx2(i,2))]) for i in 1:dim]...) - ex3 = Expr(:tuple, [:(get_data(S)[$(idx2(i,3))]) for i in 1:dim]...) - return quote - @boundscheck checkbounds(S,Colon(),j) - if j == 1 return Vec{dim}($ex1) - elseif j == 2 return Vec{dim}($ex2) - else return Vec{dim}($ex3) - end - end -end -@inline @generated function Base.getindex(S::SecondOrderTensor{dim}, i::Int, ::Colon) where {dim} - idx2(i,j) = compute_index(get_base(S), i, j) - ex1 = Expr(:tuple, [:(get_data(S)[$(idx2(1,j))]) for j in 1:dim]...) - ex2 = Expr(:tuple, [:(get_data(S)[$(idx2(2,j))]) for j in 1:dim]...) - ex3 = Expr(:tuple, [:(get_data(S)[$(idx2(3,j))]) for j in 1:dim]...) - return quote - @boundscheck checkbounds(S,i,Colon()) - if i == 1 return Vec{dim}($ex1) - elseif i == 2 return Vec{dim}($ex2) - else return Vec{dim}($ex3) - end - end -end +############ +# Indexing # +############ +@inline function compute_index(::Type{Tensor{1, dim}}, i::Int) where dim + return i +end + +@inline function compute_index(::Type{SymmetricTensor{2, dim}}, i::Int, j::Int) where {dim} + if i < j + i, j = j, i + end + # We are skipping triangle over the diagonal = (j-1) * j / 2 indices + skipped_indicies = div((j-1) * j, 2) + return dim*(j-1) + i - skipped_indicies +end + +@inline function compute_index(::Type{Tensor{2, dim}}, i::Int, j::Int) where {dim} + return dim*(j-1) + i +end + +@inline function compute_index(::Type{Tensor{3, dim}}, i::Int, j::Int, k::Int) where {dim} + lower_order = Tensor{2, dim} + I = compute_index(lower_order, i, j) + n = n_components(lower_order) + return (k-1) * n + I +end + +@inline function compute_index(::Type{Tensor{4, dim}}, i::Int, j::Int, k::Int, l::Int) where {dim} + lower_order = Tensor{2, dim} + I = compute_index(lower_order, i, j) + J = compute_index(lower_order, k, l) + n = n_components(lower_order) + return (J-1) * n + I +end + +@inline function compute_index(::Type{SymmetricTensor{4, dim}}, i::Int, j::Int, k::Int, l::Int) where {dim} + lower_order = SymmetricTensor{2, dim} + I = compute_index(lower_order, i, j) + J = compute_index(lower_order, k, l) + n = n_components(lower_order) + return (J-1) * n + I +end + +# indexed with [order][dim] +const SYMMETRIC_INDICES = ((), ([1,], [1, 2, 4], [1, 2, 3, 5, 6, 9]), (), + ([1,], [1, 2, 4, 5, 6, 8, 13, 14, 16], [ 1, 2, + 3, 5, 6, 9, 10, 11, 12, 14, 15, 18, 19, 20, + 21, 23, 24, 27, 37, 38, 39, 41, 42, 45, 46, 47, + 48, 50, 51, 54, 73, 74, 75, 77, 78, 81])) + +########################### +# getindex general tensor # +########################### +@inline function Base.getindex(S::Tensor, i::Int) + @boundscheck checkbounds(S, i) + @inbounds v = get_data(S)[i] + return v +end + +@inline function Base.getindex(S::SymmetricTensor{2, dim}, i::Int, j::Int) where {dim} + @boundscheck checkbounds(S, i, j) + @inbounds v = get_data(S)[compute_index(SymmetricTensor{2, dim}, i, j)] + return v +end + +@inline function Base.getindex(S::SymmetricTensor{4, dim}, i::Int, j::Int, k::Int, l::Int) where {dim} + @boundscheck checkbounds(S, i, j, k, l) + @inbounds v = get_data(S)[compute_index(SymmetricTensor{4, dim}, i, j, k, l)] + return v +end + +# Slice +@inline Base.getindex(v::Vec, ::Colon) = v + +function Base.getindex(S::Union{SecondOrderTensor, Tensor{3}, FourthOrderTensor}, ::Colon) + throw(ArgumentError("S[:] not defined for S of order 2, 3, or 4, use Array(S) to convert to an Array")) +end + +@inline @generated function Base.getindex(S::SecondOrderTensor{dim}, ::Colon, j::Int) where {dim} + idx2(i,j) = compute_index(get_base(S), i, j) + ex1 = Expr(:tuple, [:(get_data(S)[$(idx2(i,1))]) for i in 1:dim]...) + ex2 = Expr(:tuple, [:(get_data(S)[$(idx2(i,2))]) for i in 1:dim]...) + ex3 = Expr(:tuple, [:(get_data(S)[$(idx2(i,3))]) for i in 1:dim]...) + return quote + @boundscheck checkbounds(S,Colon(),j) + if j == 1 return Vec{dim}($ex1) + elseif j == 2 return Vec{dim}($ex2) + else return Vec{dim}($ex3) + end + end +end +@inline @generated function Base.getindex(S::SecondOrderTensor{dim}, i::Int, ::Colon) where {dim} + idx2(i,j) = compute_index(get_base(S), i, j) + ex1 = Expr(:tuple, [:(get_data(S)[$(idx2(1,j))]) for j in 1:dim]...) + ex2 = Expr(:tuple, [:(get_data(S)[$(idx2(2,j))]) for j in 1:dim]...) + ex3 = Expr(:tuple, [:(get_data(S)[$(idx2(3,j))]) for j in 1:dim]...) + return quote + @boundscheck checkbounds(S,i,Colon()) + if i == 1 return Vec{dim}($ex1) + elseif i == 2 return Vec{dim}($ex2) + else return Vec{dim}($ex3) + end + end +end diff --git a/src/math_ops.jl b/src/math_ops.jl index 3b8c5bbb..204954cb 100644 --- a/src/math_ops.jl +++ b/src/math_ops.jl @@ -1,429 +1,429 @@ -# norm, det, inv, eig, trace, dev -""" - norm(::Vec) - norm(::SecondOrderTensor) - norm(::FourthOrderTensor) - -Computes the norm of a tensor. - -# Examples -```jldoctest -julia> A = rand(Tensor{2,3}) -3×3 Tensor{2, 3, Float64, 9}: - 0.590845 0.460085 0.200586 - 0.766797 0.794026 0.298614 - 0.566237 0.854147 0.246837 - -julia> norm(A) -1.7377443667834922 -``` -""" -@inline LinearAlgebra.norm(v::Vec) = sqrt(dot(v, v)) -@inline LinearAlgebra.norm(S::SecondOrderTensor) = sqrt(dcontract(S, S)) - -@generated function LinearAlgebra.norm(S::Tensor{3,dim}) where {dim} - idx(i,j,k) = compute_index(get_base(S), i, j, k) - ex = Expr[] - for k in 1:dim, j in 1:dim, i in 1:dim - push!(ex, :(get_data(S)[$(idx(i,j,k))])) - end - exp = reducer(ex, ex) - return quote - $(Expr(:meta, :inline)) - @inbounds return sqrt($exp) - end -end - -# special case for Tensor{4, 3} since it is faster than unrolling -@inline LinearAlgebra.norm(S::Tensor{4, 3}) = sqrt(mapreduce(abs2, +, S)) - -@generated function LinearAlgebra.norm(S::FourthOrderTensor{dim}) where {dim} - idx(i,j,k,l) = compute_index(get_base(S), i, j, k, l) - ex = Expr[] - for l in 1:dim, k in 1:dim, j in 1:dim, i in 1:dim - push!(ex, :(get_data(S)[$(idx(i,j,k,l))])) - end - exp = reducer(ex, ex) - return quote - $(Expr(:meta, :inline)) - @inbounds return sqrt($exp) - end -end - -LinearAlgebra.normalize(t::AbstractTensor) = t/norm(t) - -""" - det(::SecondOrderTensor) - -Computes the determinant of a second order tensor. - -# Examples -```jldoctest -julia> A = rand(SymmetricTensor{2,3}) -3×3 SymmetricTensor{2, 3, Float64, 6}: - 0.590845 0.766797 0.566237 - 0.766797 0.460085 0.794026 - 0.566237 0.794026 0.854147 - -julia> det(A) --0.1005427219925894 -``` -""" -@inline LinearAlgebra.det(t::SecondOrderTensor{1}) = @inbounds t[1,1] -@inline LinearAlgebra.det(t::SecondOrderTensor{2}) = @inbounds (t[1,1] * t[2,2] - t[1,2] * t[2,1]) -@inline function LinearAlgebra.det(t::SecondOrderTensor{3}) - @inbounds (t[1,1] * (t[2,2]*t[3,3] - t[2,3]*t[3,2]) - - t[1,2] * (t[2,1]*t[3,3] - t[2,3]*t[3,1]) + - t[1,3] * (t[2,1]*t[3,2] - t[2,2]*t[3,1])) -end - -""" - inv(::SecondOrderTensor) - -Computes the inverse of a second order tensor. - -# Examples -```jldoctest -julia> A = rand(Tensor{2,3}) -3×3 Tensor{2, 3, Float64, 9}: - 0.590845 0.460085 0.200586 - 0.766797 0.794026 0.298614 - 0.566237 0.854147 0.246837 - -julia> inv(A) -3×3 Tensor{2, 3, Float64, 9}: - 19.7146 -19.2802 7.30384 - 6.73809 -10.7687 7.55198 - -68.541 81.4917 -38.8361 -``` -""" -@generated function Base.inv(t::Tensor{2, dim}) where {dim} - Tt = get_base(t) - idx(i,j) = compute_index(Tt, i, j) - if dim == 1 - ex = :($Tt((dinv, ))) - elseif dim == 2 - ex = quote - v = get_data(t) - $Tt((v[$(idx(2,2))] * dinv, -v[$(idx(2,1))] * dinv, - -v[$(idx(1,2))] * dinv, v[$(idx(1,1))] * dinv)) - end - else # dim == 3 - ex = quote - v = get_data(t) - $Tt(((v[$(idx(2,2))]*v[$(idx(3,3))] - v[$(idx(2,3))]*v[$(idx(3,2))]) * dinv, - -(v[$(idx(2,1))]*v[$(idx(3,3))] - v[$(idx(2,3))]*v[$(idx(3,1))]) * dinv, - (v[$(idx(2,1))]*v[$(idx(3,2))] - v[$(idx(2,2))]*v[$(idx(3,1))]) * dinv, - - -(v[$(idx(1,2))]*v[$(idx(3,3))] - v[$(idx(1,3))]*v[$(idx(3,2))]) * dinv, - (v[$(idx(1,1))]*v[$(idx(3,3))] - v[$(idx(1,3))]*v[$(idx(3,1))]) * dinv, - -(v[$(idx(1,1))]*v[$(idx(3,2))] - v[$(idx(1,2))]*v[$(idx(3,1))]) * dinv, - - (v[$(idx(1,2))]*v[$(idx(2,3))] - v[$(idx(1,3))]*v[$(idx(2,2))]) * dinv, - -(v[$(idx(1,1))]*v[$(idx(2,3))] - v[$(idx(1,3))]*v[$(idx(2,1))]) * dinv, - (v[$(idx(1,1))]*v[$(idx(2,2))] - v[$(idx(1,2))]*v[$(idx(2,1))]) * dinv)) - end - end - return quote - $(Expr(:meta, :inline)) - dinv = 1 / det(t) - @inbounds return $ex - end -end - -@generated function Base.inv(t::SymmetricTensor{2, dim}) where {dim} - Tt = get_base(t) - idx(i,j) = compute_index(Tt, i, j) - if dim == 1 - ex = :($Tt((dinv, ))) - elseif dim == 2 - ex = quote - v = get_data(t) - $Tt((v[$(idx(2,2))] * dinv, -v[$(idx(2,1))] * dinv, - v[$(idx(1,1))] * dinv)) - end - else # dim == 3 - ex = quote - v = get_data(t) - $Tt(((v[$(idx(2,2))]*v[$(idx(3,3))] - v[$(idx(2,3))]*v[$(idx(3,2))]) * dinv, - -(v[$(idx(2,1))]*v[$(idx(3,3))] - v[$(idx(2,3))]*v[$(idx(3,1))]) * dinv, - (v[$(idx(2,1))]*v[$(idx(3,2))] - v[$(idx(2,2))]*v[$(idx(3,1))]) * dinv, - - (v[$(idx(1,1))]*v[$(idx(3,3))] - v[$(idx(1,3))]*v[$(idx(3,1))]) * dinv, - -(v[$(idx(1,1))]*v[$(idx(3,2))] - v[$(idx(1,2))]*v[$(idx(3,1))]) * dinv, - - (v[$(idx(1,1))]*v[$(idx(2,2))] - v[$(idx(1,2))]*v[$(idx(2,1))]) * dinv)) - end - end - return quote - $(Expr(:meta, :inline)) - dinv = 1 / det(t) - @inbounds return $ex - end -end - -function Base.inv(t::Tensor{4, dim}) where {dim} - fromvoigt(Tensor{4, dim}, inv(tovoigt(t))) -end - -function Base.inv(t::SymmetricTensor{4, dim, T}) where {dim, T} - frommandel(SymmetricTensor{4, dim}, inv(tomandel(t))) -end - -function Base.inv(t::Tensor{4, dim, <:Real}) where {dim} - fromvoigt(Tensor{4, dim}, inv(tovoigt(SMatrix, t))) -end - -function Base.inv(t::SymmetricTensor{4, dim, T}) where {dim, T<:Real} - frommandel(SymmetricTensor{4, dim}, inv(tomandel(SMatrix, t))) -end - - -Base.:\(S1::SecondOrderTensor, S2::AbstractTensor) = inv(S1) ⋅ S2 - -""" - sqrt(S::SymmetricTensor{2}) - -Calculate the square root of the positive definite symmetric -second order tensor `S`, such that `√S ⋅ √S == S`. - -# Examples -```jldoctest -julia> S = rand(SymmetricTensor{2,2}); S = tdot(S) -2×2 SymmetricTensor{2, 2, Float64, 3}: - 0.937075 0.887247 - 0.887247 0.908603 - -julia> sqrt(S) -2×2 SymmetricTensor{2, 2, Float64, 3}: - 0.776178 0.578467 - 0.578467 0.757614 - -julia> √S ⋅ √S ≈ S -true -``` -""" -Base.sqrt(::SymmetricTensor{2}) - -Base.sqrt(S::SymmetricTensor{2,1}) = SymmetricTensor{2,1}((sqrt(S[1,1]),)) - -# https://en.m.wikipedia.org/wiki/Square_root_of_a_2_by_2_matrix -function Base.sqrt(S::SymmetricTensor{2,2}) - s = √(det(S)) - t = √(tr(S)+2s) - return SymmetricTensor{2,2}((S[1,1]+s, S[2,1], S[2,2]+s)) / t -end - -function Base.sqrt(S::SymmetricTensor{2,3,T}) where T - E = eigen(S) - λ = E.values - Φ = E.vectors - z = zero(T) - Λ = Tensor{2,3}((√(λ[1]), z, z, z, √(λ[2]), z, z, z, √(λ[3]))) - return unsafe_symmetric(Φ⋅Λ⋅Φ') -end - -""" - tr(::SecondOrderTensor) - -Computes the trace of a second order tensor. - -# Examples -```jldoctest -julia> A = rand(SymmetricTensor{2,3}) -3×3 SymmetricTensor{2, 3, Float64, 6}: - 0.590845 0.766797 0.566237 - 0.766797 0.460085 0.794026 - 0.566237 0.794026 0.854147 - -julia> tr(A) -1.9050765715072775 -``` -""" -@generated function LinearAlgebra.tr(S::SecondOrderTensor{dim}) where {dim} - idx(i,j) = compute_index(get_base(S), i, j) - ex = Expr[:(get_data(S)[$(idx(i,i))]) for i in 1:dim] - exp = reduce((ex1, ex2) -> :(+($ex1, $ex2)), ex) - @inbounds return exp -end - -Statistics.mean(S::SecondOrderTensor) = tr(S) / 3 - -""" - vol(::SecondOrderTensor) - -Computes the volumetric part of a second order tensor -based on the additive decomposition. - -# Examples -```jldoctest -julia> A = rand(SymmetricTensor{2,3}) -3×3 SymmetricTensor{2, 3, Float64, 6}: - 0.590845 0.766797 0.566237 - 0.766797 0.460085 0.794026 - 0.566237 0.794026 0.854147 - -julia> vol(A) -3×3 SymmetricTensor{2, 3, Float64, 6}: - 0.635026 0.0 0.0 - 0.0 0.635026 0.0 - 0.0 0.0 0.635026 - -julia> vol(A) + dev(A) ≈ A -true -``` -""" -vol(S::SecondOrderTensor) = mean(S) * one(S) - -""" - dev(::SecondOrderTensor) - -Computes the deviatoric part of a second order tensor. - -# Examples -```jldoctest -julia> A = rand(Tensor{2, 3}); - -julia> dev(A) -3×3 Tensor{2, 3, Float64, 9}: - 0.0469421 0.460085 0.200586 - 0.766797 0.250123 0.298614 - 0.566237 0.854147 -0.297065 - -julia> tr(dev(A)) -0.0 -``` -""" -@inline function dev(S::SecondOrderTensor) - Tt = get_base(typeof(S)) - trace = tr(S) / 3 - Tt( - @inline function(i, j) - @inbounds v = i == j ? S[i,j] - trace : S[i,j] - v - end - ) -end - -# https://web.archive.org/web/20150311224314/http://inside.mines.edu/fs_home/gmurray/ArbitraryAxisRotation/ -""" - rotate(x::AbstractTensor{3}, u::Vec{3}, θ::Number) - -Rotate a three dimensional tensor `x` around the vector `u` a total of `θ` radians. - -# Examples -```jldoctest -julia> x = Vec{3}((0.0, 0.0, 1.0)); - -julia> u = Vec{3}((0.0, 1.0, 0.0)); - -julia> rotate(x, u, π/2) -3-element Vec{3, Float64}: - 1.0 - 0.0 - 6.123233995736766e-17 -``` -""" -rotate(x::AbstractTensor{<:Any,3}, u::Vec{3}, θ::Number) - -function rotate(x::Vec{3}, u::Vec{3}, θ::Number) - ux = u ⋅ x - u² = u ⋅ u - s, c = sincos(θ) - (u * ux * (1 - c) + u² * x * c + sqrt(u²) * (u × x) * s) / u² -end - -""" - rotate(x::AbstractTensor{2}, θ::Number) - -Rotate a two dimensional tensor `x` `θ` radians around the out-of-plane axis. - -# Examples -```jldoctest -julia> x = Vec{2}((0.0, 1.0)); - -julia> rotate(x, π/4) -2-element Vec{2, Float64}: - -0.7071067811865475 - 0.7071067811865476 -``` -""" -rotate(x::AbstractTensor{<:Any, 2}, θ::Number) - -function rotate(x::Vec{2}, θ::Number) - s, c = sincos(θ) - return Vec{2}((c * x[1] - s * x[2], s * x[1] + c * x[2])) -end - -@deprecate rotation_matrix rotation_tensor false - -""" - rotation_tensor(θ::Number) - -Return the two-dimensional rotation matrix corresponding to rotation of `θ` radians around -the out-of-plane axis (i.e. around `(0, 0, 1)`). -""" -function rotation_tensor(θ::Number) - s, c = sincos(θ) - return Tensor{2, 2}((c, s, -s, c)) -end - -""" - rotation_tensor(ψ::Number, θ::Number, ϕ::Number) - -Return the three-dimensional rotation matrix corresponding to the rotation described -by the three Euler angles ``ψ``, ``θ``, ``ϕ``. - -```math -R(ψ,θ,ϕ) = R_x(ψ)R_y(θ)R_z(ϕ) -``` -see e.g. for a complete description. - -Note that the [gimbal lock phenomena](https://en.wikipedia.org/wiki/Gimbal_lock) can occur when using -this rotation tensor parametrization. -""" -function rotation_tensor(ψ::Number,θ::Number,ϕ::Number) - sψ, cψ = sincos(ψ) - sθ, cθ = sincos(θ) - sϕ, cϕ = sincos(ϕ) - return Tensor{2,3}((cθ * cϕ, cθ * sϕ, -sθ, #first column - sψ * sθ * cϕ - cψ * sϕ, sψ * sθ * sϕ + cψ * cϕ, sψ * cθ, #second column - cψ * sθ * cϕ + sψ * sϕ, cψ * sθ * sϕ - sψ * cϕ, cψ * cθ)) #third column -end - -""" - rotation_tensor(u::Vec{3}, θ::Number) - -Return the three-dimensional rotation matrix corresponding to rotation of `θ` radians around -the vector `u`. -""" -function rotation_tensor(u::Vec{3, T}, θ::Number) where T - # See http://mathworld.wolfram.com/RodriguesRotationFormula.html - u = u / norm(u) - z = zero(T) - ω = Tensor{2, 3}((z, u[3], -u[2], -u[3], z, u[1], u[2], -u[1], z)) - s, c = sincos(θ) - return one(ω) + s * ω + (1 - c) * ω^2 -end - -# args is (u::Vec{3}, θ::Number) for 3D tensors, and (θ::number,) for 2D -function rotate(x::SymmetricTensor{2}, args...) - R = rotation_tensor(args...) - return unsafe_symmetric(R ⋅ x ⋅ R') -end -function rotate(x::Tensor{2}, args...) - R = rotation_tensor(args...) - return R ⋅ x ⋅ R' -end -function rotate(x::Tensor{3}, args...) - R = rotation_tensor(args...) - return otimesu(R, R) ⊡ x ⋅ R' -end -function rotate(x::Tensor{4}, args...) - R = rotation_tensor(args...) - return otimesu(R, R) ⊡ x ⊡ otimesu(R', R') -end -function rotate(x::SymmetricTensor{4}, args...) - R = rotation_tensor(args...) - return unsafe_symmetric(otimesu(R, R) ⊡ x ⊡ otimesu(R', R')) -end +# norm, det, inv, eig, trace, dev +""" + norm(::Vec) + norm(::SecondOrderTensor) + norm(::FourthOrderTensor) + +Computes the norm of a tensor. + +# Examples +```jldoctest +julia> A = rand(Tensor{2,3}) +3×3 Tensor{2, 3, Float64, 9}: + 0.590845 0.460085 0.200586 + 0.766797 0.794026 0.298614 + 0.566237 0.854147 0.246837 + +julia> norm(A) +1.7377443667834922 +``` +""" +@inline LinearAlgebra.norm(v::Vec) = sqrt(dot(v, v)) +@inline LinearAlgebra.norm(S::SecondOrderTensor) = sqrt(dcontract(S, S)) + +@generated function LinearAlgebra.norm(S::Tensor{3,dim}) where {dim} + idx(i,j,k) = compute_index(get_base(S), i, j, k) + ex = Expr[] + for k in 1:dim, j in 1:dim, i in 1:dim + push!(ex, :(get_data(S)[$(idx(i,j,k))])) + end + exp = reducer(ex, ex) + return quote + $(Expr(:meta, :inline)) + @inbounds return sqrt($exp) + end +end + +# special case for Tensor{4, 3} since it is faster than unrolling +@inline LinearAlgebra.norm(S::Tensor{4, 3}) = sqrt(mapreduce(abs2, +, S)) + +@generated function LinearAlgebra.norm(S::FourthOrderTensor{dim}) where {dim} + idx(i,j,k,l) = compute_index(get_base(S), i, j, k, l) + ex = Expr[] + for l in 1:dim, k in 1:dim, j in 1:dim, i in 1:dim + push!(ex, :(get_data(S)[$(idx(i,j,k,l))])) + end + exp = reducer(ex, ex) + return quote + $(Expr(:meta, :inline)) + @inbounds return sqrt($exp) + end +end + +LinearAlgebra.normalize(t::AbstractTensor) = t/norm(t) + +""" + det(::SecondOrderTensor) + +Computes the determinant of a second order tensor. + +# Examples +```jldoctest +julia> A = rand(SymmetricTensor{2,3}) +3×3 SymmetricTensor{2, 3, Float64, 6}: + 0.590845 0.766797 0.566237 + 0.766797 0.460085 0.794026 + 0.566237 0.794026 0.854147 + +julia> det(A) +-0.1005427219925894 +``` +""" +@inline LinearAlgebra.det(t::SecondOrderTensor{1}) = @inbounds t[1,1] +@inline LinearAlgebra.det(t::SecondOrderTensor{2}) = @inbounds (t[1,1] * t[2,2] - t[1,2] * t[2,1]) +@inline function LinearAlgebra.det(t::SecondOrderTensor{3}) + @inbounds (t[1,1] * (t[2,2]*t[3,3] - t[2,3]*t[3,2]) - + t[1,2] * (t[2,1]*t[3,3] - t[2,3]*t[3,1]) + + t[1,3] * (t[2,1]*t[3,2] - t[2,2]*t[3,1])) +end + +""" + inv(::SecondOrderTensor) + +Computes the inverse of a second order tensor. + +# Examples +```jldoctest +julia> A = rand(Tensor{2,3}) +3×3 Tensor{2, 3, Float64, 9}: + 0.590845 0.460085 0.200586 + 0.766797 0.794026 0.298614 + 0.566237 0.854147 0.246837 + +julia> inv(A) +3×3 Tensor{2, 3, Float64, 9}: + 19.7146 -19.2802 7.30384 + 6.73809 -10.7687 7.55198 + -68.541 81.4917 -38.8361 +``` +""" +@generated function Base.inv(t::Tensor{2, dim}) where {dim} + Tt = get_base(t) + idx(i,j) = compute_index(Tt, i, j) + if dim == 1 + ex = :($Tt((dinv, ))) + elseif dim == 2 + ex = quote + v = get_data(t) + $Tt((v[$(idx(2,2))] * dinv, -v[$(idx(2,1))] * dinv, + -v[$(idx(1,2))] * dinv, v[$(idx(1,1))] * dinv)) + end + else # dim == 3 + ex = quote + v = get_data(t) + $Tt(((v[$(idx(2,2))]*v[$(idx(3,3))] - v[$(idx(2,3))]*v[$(idx(3,2))]) * dinv, + -(v[$(idx(2,1))]*v[$(idx(3,3))] - v[$(idx(2,3))]*v[$(idx(3,1))]) * dinv, + (v[$(idx(2,1))]*v[$(idx(3,2))] - v[$(idx(2,2))]*v[$(idx(3,1))]) * dinv, + + -(v[$(idx(1,2))]*v[$(idx(3,3))] - v[$(idx(1,3))]*v[$(idx(3,2))]) * dinv, + (v[$(idx(1,1))]*v[$(idx(3,3))] - v[$(idx(1,3))]*v[$(idx(3,1))]) * dinv, + -(v[$(idx(1,1))]*v[$(idx(3,2))] - v[$(idx(1,2))]*v[$(idx(3,1))]) * dinv, + + (v[$(idx(1,2))]*v[$(idx(2,3))] - v[$(idx(1,3))]*v[$(idx(2,2))]) * dinv, + -(v[$(idx(1,1))]*v[$(idx(2,3))] - v[$(idx(1,3))]*v[$(idx(2,1))]) * dinv, + (v[$(idx(1,1))]*v[$(idx(2,2))] - v[$(idx(1,2))]*v[$(idx(2,1))]) * dinv)) + end + end + return quote + $(Expr(:meta, :inline)) + dinv = 1 / det(t) + @inbounds return $ex + end +end + +@generated function Base.inv(t::SymmetricTensor{2, dim}) where {dim} + Tt = get_base(t) + idx(i,j) = compute_index(Tt, i, j) + if dim == 1 + ex = :($Tt((dinv, ))) + elseif dim == 2 + ex = quote + v = get_data(t) + $Tt((v[$(idx(2,2))] * dinv, -v[$(idx(2,1))] * dinv, + v[$(idx(1,1))] * dinv)) + end + else # dim == 3 + ex = quote + v = get_data(t) + $Tt(((v[$(idx(2,2))]*v[$(idx(3,3))] - v[$(idx(2,3))]*v[$(idx(3,2))]) * dinv, + -(v[$(idx(2,1))]*v[$(idx(3,3))] - v[$(idx(2,3))]*v[$(idx(3,1))]) * dinv, + (v[$(idx(2,1))]*v[$(idx(3,2))] - v[$(idx(2,2))]*v[$(idx(3,1))]) * dinv, + + (v[$(idx(1,1))]*v[$(idx(3,3))] - v[$(idx(1,3))]*v[$(idx(3,1))]) * dinv, + -(v[$(idx(1,1))]*v[$(idx(3,2))] - v[$(idx(1,2))]*v[$(idx(3,1))]) * dinv, + + (v[$(idx(1,1))]*v[$(idx(2,2))] - v[$(idx(1,2))]*v[$(idx(2,1))]) * dinv)) + end + end + return quote + $(Expr(:meta, :inline)) + dinv = 1 / det(t) + @inbounds return $ex + end +end + +function Base.inv(t::Tensor{4, dim}) where {dim} + fromvoigt(Tensor{4, dim}, inv(tovoigt(t))) +end + +function Base.inv(t::SymmetricTensor{4, dim, T}) where {dim, T} + frommandel(SymmetricTensor{4, dim}, inv(tomandel(t))) +end + +function Base.inv(t::Tensor{4, dim, <:Real}) where {dim} + fromvoigt(Tensor{4, dim}, inv(tovoigt(SMatrix, t))) +end + +function Base.inv(t::SymmetricTensor{4, dim, T}) where {dim, T<:Real} + frommandel(SymmetricTensor{4, dim}, inv(tomandel(SMatrix, t))) +end + + +Base.:\(S1::SecondOrderTensor, S2::AbstractTensor) = inv(S1) ⋅ S2 + +""" + sqrt(S::SymmetricTensor{2}) + +Calculate the square root of the positive definite symmetric +second order tensor `S`, such that `√S ⋅ √S == S`. + +# Examples +```jldoctest +julia> S = rand(SymmetricTensor{2,2}); S = tdot(S) +2×2 SymmetricTensor{2, 2, Float64, 3}: + 0.937075 0.887247 + 0.887247 0.908603 + +julia> sqrt(S) +2×2 SymmetricTensor{2, 2, Float64, 3}: + 0.776178 0.578467 + 0.578467 0.757614 + +julia> √S ⋅ √S ≈ S +true +``` +""" +Base.sqrt(::SymmetricTensor{2}) + +Base.sqrt(S::SymmetricTensor{2,1}) = SymmetricTensor{2,1}((sqrt(S[1,1]),)) + +# https://en.m.wikipedia.org/wiki/Square_root_of_a_2_by_2_matrix +function Base.sqrt(S::SymmetricTensor{2,2}) + s = √(det(S)) + t = √(tr(S)+2s) + return SymmetricTensor{2,2}((S[1,1]+s, S[2,1], S[2,2]+s)) / t +end + +function Base.sqrt(S::SymmetricTensor{2,3,T}) where T + E = eigen(S) + λ = E.values + Φ = E.vectors + z = zero(T) + Λ = Tensor{2,3}((√(λ[1]), z, z, z, √(λ[2]), z, z, z, √(λ[3]))) + return unsafe_symmetric(Φ⋅Λ⋅Φ') +end + +""" + tr(::SecondOrderTensor) + +Computes the trace of a second order tensor. + +# Examples +```jldoctest +julia> A = rand(SymmetricTensor{2,3}) +3×3 SymmetricTensor{2, 3, Float64, 6}: + 0.590845 0.766797 0.566237 + 0.766797 0.460085 0.794026 + 0.566237 0.794026 0.854147 + +julia> tr(A) +1.9050765715072775 +``` +""" +@generated function LinearAlgebra.tr(S::SecondOrderTensor{dim}) where {dim} + idx(i,j) = compute_index(get_base(S), i, j) + ex = Expr[:(get_data(S)[$(idx(i,i))]) for i in 1:dim] + exp = reduce((ex1, ex2) -> :(+($ex1, $ex2)), ex) + @inbounds return exp +end + +Statistics.mean(S::SecondOrderTensor) = tr(S) / 3 + +""" + vol(::SecondOrderTensor) + +Computes the volumetric part of a second order tensor +based on the additive decomposition. + +# Examples +```jldoctest +julia> A = rand(SymmetricTensor{2,3}) +3×3 SymmetricTensor{2, 3, Float64, 6}: + 0.590845 0.766797 0.566237 + 0.766797 0.460085 0.794026 + 0.566237 0.794026 0.854147 + +julia> vol(A) +3×3 SymmetricTensor{2, 3, Float64, 6}: + 0.635026 0.0 0.0 + 0.0 0.635026 0.0 + 0.0 0.0 0.635026 + +julia> vol(A) + dev(A) ≈ A +true +``` +""" +vol(S::SecondOrderTensor) = mean(S) * one(S) + +""" + dev(::SecondOrderTensor) + +Computes the deviatoric part of a second order tensor. + +# Examples +```jldoctest +julia> A = rand(Tensor{2, 3}); + +julia> dev(A) +3×3 Tensor{2, 3, Float64, 9}: + 0.0469421 0.460085 0.200586 + 0.766797 0.250123 0.298614 + 0.566237 0.854147 -0.297065 + +julia> tr(dev(A)) +0.0 +``` +""" +@inline function dev(S::SecondOrderTensor) + Tt = get_base(typeof(S)) + trace = tr(S) / 3 + Tt( + @inline function(i, j) + @inbounds v = i == j ? S[i,j] - trace : S[i,j] + v + end + ) +end + +# https://web.archive.org/web/20150311224314/http://inside.mines.edu/fs_home/gmurray/ArbitraryAxisRotation/ +""" + rotate(x::AbstractTensor{3}, u::Vec{3}, θ::Number) + +Rotate a three dimensional tensor `x` around the vector `u` a total of `θ` radians. + +# Examples +```jldoctest +julia> x = Vec{3}((0.0, 0.0, 1.0)); + +julia> u = Vec{3}((0.0, 1.0, 0.0)); + +julia> rotate(x, u, π/2) +3-element Vec{3, Float64}: + 1.0 + 0.0 + 6.123233995736766e-17 +``` +""" +rotate(x::AbstractTensor{<:Any,3}, u::Vec{3}, θ::Number) + +function rotate(x::Vec{3}, u::Vec{3}, θ::Number) + ux = u ⋅ x + u² = u ⋅ u + s, c = sincos(θ) + (u * ux * (1 - c) + u² * x * c + sqrt(u²) * (u × x) * s) / u² +end + +""" + rotate(x::AbstractTensor{2}, θ::Number) + +Rotate a two dimensional tensor `x` `θ` radians around the out-of-plane axis. + +# Examples +```jldoctest +julia> x = Vec{2}((0.0, 1.0)); + +julia> rotate(x, π/4) +2-element Vec{2, Float64}: + -0.7071067811865475 + 0.7071067811865476 +``` +""" +rotate(x::AbstractTensor{<:Any, 2}, θ::Number) + +function rotate(x::Vec{2}, θ::Number) + s, c = sincos(θ) + return Vec{2}((c * x[1] - s * x[2], s * x[1] + c * x[2])) +end + +@deprecate rotation_matrix rotation_tensor false + +""" + rotation_tensor(θ::Number) + +Return the two-dimensional rotation matrix corresponding to rotation of `θ` radians around +the out-of-plane axis (i.e. around `(0, 0, 1)`). +""" +function rotation_tensor(θ::Number) + s, c = sincos(θ) + return Tensor{2, 2}((c, s, -s, c)) +end + +""" + rotation_tensor(ψ::Number, θ::Number, ϕ::Number) + +Return the three-dimensional rotation matrix corresponding to the rotation described +by the three Euler angles ``ψ``, ``θ``, ``ϕ``. + +```math +R(ψ,θ,ϕ) = R_x(ψ)R_y(θ)R_z(ϕ) +``` +see e.g. for a complete description. + +Note that the [gimbal lock phenomena](https://en.wikipedia.org/wiki/Gimbal_lock) can occur when using +this rotation tensor parametrization. +""" +function rotation_tensor(ψ::Number,θ::Number,ϕ::Number) + sψ, cψ = sincos(ψ) + sθ, cθ = sincos(θ) + sϕ, cϕ = sincos(ϕ) + return Tensor{2,3}((cθ * cϕ, cθ * sϕ, -sθ, #first column + sψ * sθ * cϕ - cψ * sϕ, sψ * sθ * sϕ + cψ * cϕ, sψ * cθ, #second column + cψ * sθ * cϕ + sψ * sϕ, cψ * sθ * sϕ - sψ * cϕ, cψ * cθ)) #third column +end + +""" + rotation_tensor(u::Vec{3}, θ::Number) + +Return the three-dimensional rotation matrix corresponding to rotation of `θ` radians around +the vector `u`. +""" +function rotation_tensor(u::Vec{3, T}, θ::Number) where T + # See http://mathworld.wolfram.com/RodriguesRotationFormula.html + u = u / norm(u) + z = zero(T) + ω = Tensor{2, 3}((z, u[3], -u[2], -u[3], z, u[1], u[2], -u[1], z)) + s, c = sincos(θ) + return one(ω) + s * ω + (1 - c) * ω^2 +end + +# args is (u::Vec{3}, θ::Number) for 3D tensors, and (θ::number,) for 2D +function rotate(x::SymmetricTensor{2}, args...) + R = rotation_tensor(args...) + return unsafe_symmetric(R ⋅ x ⋅ R') +end +function rotate(x::Tensor{2}, args...) + R = rotation_tensor(args...) + return R ⋅ x ⋅ R' +end +function rotate(x::Tensor{3}, args...) + R = rotation_tensor(args...) + return otimesu(R, R) ⊡ x ⋅ R' +end +function rotate(x::Tensor{4}, args...) + R = rotation_tensor(args...) + return otimesu(R, R) ⊡ x ⊡ otimesu(R', R') +end +function rotate(x::SymmetricTensor{4}, args...) + R = rotation_tensor(args...) + return unsafe_symmetric(otimesu(R, R) ⊡ x ⊡ otimesu(R', R')) +end diff --git a/src/precompile.jl b/src/precompile.jl index 3a1de888..1ff2a601 100644 --- a/src/precompile.jl +++ b/src/precompile.jl @@ -1,34 +1,34 @@ -using PrecompileTools - -@compile_workload begin - for dim in (2, 3) - v = ones(Tensor{1, dim, Float64,}) - σ = one(SymmetricTensor{2, dim, Float64}) - F = one(Tensor{2, dim, Float64,}) - E = one(SymmetricTensor{4, dim, Float64}) - C = one(Tensor{4, dim, Float64,}) - v * 1.0 - v ⋅ v - v ⊗ v - σ ⋅ v - F ⋅ v - σ ⊗ σ - F ⊗ F - σ * 1.0 - F * 1.0 - σ ⊡ σ - F ⊡ F - E ⊡ σ - C ⊡ F - E * 1.0 - C * 1.0 - - # TODO: AD? - end - - # See discussion in https://github.com/Ferrite-FEM/Tensors.jl/pull/190 - if @isdefined var"#102#103" - precompile(Tuple{typeof(apply_all), Type{Tensor{1, 2, T, M} where M where T}, var"#102#103"{Float64}}) - precompile(Tuple{typeof(apply_all), Type{Tensor{1, 3, T, M} where M where T}, var"#102#103"{Float64}}) - end -end +using PrecompileTools + +@compile_workload begin + for dim in (2, 3) + v = ones(Tensor{1, dim, Float64,}) + σ = one(SymmetricTensor{2, dim, Float64}) + F = one(Tensor{2, dim, Float64,}) + E = one(SymmetricTensor{4, dim, Float64}) + C = one(Tensor{4, dim, Float64,}) + v * 1.0 + v ⋅ v + v ⊗ v + σ ⋅ v + F ⋅ v + σ ⊗ σ + F ⊗ F + σ * 1.0 + F * 1.0 + σ ⊡ σ + F ⊡ F + E ⊡ σ + C ⊡ F + E * 1.0 + C * 1.0 + + # TODO: AD? + end + + # See discussion in https://github.com/Ferrite-FEM/Tensors.jl/pull/190 + if @isdefined var"#102#103" + precompile(Tuple{typeof(apply_all), Type{Tensor{1, 2, T, M} where M where T}, var"#102#103"{Float64}}) + precompile(Tuple{typeof(apply_all), Type{Tensor{1, 3, T, M} where M where T}, var"#102#103"{Float64}}) + end +end diff --git a/src/promotion_conversion.jl b/src/promotion_conversion.jl index 61257051..9f04de6d 100644 --- a/src/promotion_conversion.jl +++ b/src/promotion_conversion.jl @@ -1,88 +1,88 @@ -############# -# Promotion # -############# - -# Promotion between two tensors promote the eltype and promotes -# symmetric tensors to tensors - -@inline function Base.promote_rule(::Type{SymmetricTensor{order, dim, A, M}}, - ::Type{SymmetricTensor{order, dim, B, M}}) where {dim, A, B, order, M} - SymmetricTensor{order, dim, promote_type(A, B), M} -end - -@inline function Base.promote_rule(::Type{Tensor{order, dim, A, M}}, - ::Type{Tensor{order, dim, B, M}}) where {dim, A, B, order, M} - Tensor{order, dim, promote_type(A, B), M} -end - -@inline function Base.promote_rule(::Type{SymmetricTensor{order, dim, A, M1}}, - ::Type{Tensor{order, dim, B, M2}}) where {dim, A, B, order, M1, M2} - Tensor{order, dim, promote_type(A, B), M2} -end - -@inline function Base.promote_rule(::Type{Tensor{order, dim, A, M1}}, - ::Type{SymmetricTensor{order, dim, B, M2}}) where {dim, A, B, order, M1, M2} - Tensor{order, dim, promote_type(A, B), M1} -end - -# inlined promote (promote in Base is not inlined) -@inline function Base.promote(S1::T, S2::S) where {T <: AbstractTensor, S <: AbstractTensor} - return convert(promote_type(T, S), S1), convert(promote_type(T, S), S2) -end -@inline Base.promote(S1::AbstractTensor{order, dim, T}) where {order, dim, T} = convert(Tensor{order, dim, T}, S1) - -# base promotion that only promotes SymmetricTensor to Tensor but leaves eltype -@inline function promote_base(S1::Tensor{order, dim}, S2::SymmetricTensor{order, dim}) where {order, dim} - return S1, convert(Tensor{order, dim}, S2) -end -@inline function promote_base(S1::SymmetricTensor{order, dim}, S2::Tensor{order, dim}) where {order, dim} - return convert(Tensor{order, dim}, S1), S2 -end - -############### -# Conversions # -############### - -# Identity conversions -@inline Base.convert(::Type{Tensor{order, dim, T}}, t::Tensor{order, dim, T}) where {order, dim, T} = t -@inline Base.convert(::Type{Tensor{order, dim, T, M}}, t::Tensor{order, dim, T, M}) where {order, dim, T, M} = t -@inline Base.convert(::Type{SymmetricTensor{order, dim, T}}, t::SymmetricTensor{order, dim, T}) where {order, dim, T} = t -@inline Base.convert(::Type{SymmetricTensor{order, dim, T, M}}, t::SymmetricTensor{order, dim, T, M}) where {order, dim, T, M} = t - -# Change element type -@inline function Base.convert(::Type{Tensor{order, dim, T1}}, t::Tensor{order, dim, T2}) where {order, dim, T1, T2} - apply_all(Tensor{order, dim}, @inline function(i) @inbounds T1(t.data[i]); end) -end - -@inline function Base.convert(::Type{SymmetricTensor{order, dim, T1}}, t::SymmetricTensor{order, dim, T2}) where {order, dim, T1, T2} - apply_all(SymmetricTensor{order, dim}, @inline function(i) @inbounds T1(t.data[i]); end) -end - -# Peel off the M but define these so that convert(typeof(...), ...) works -@inline Base.convert(::Type{Tensor{order, dim, T1, M}}, t::Tensor{order, dim}) where {order, dim, T1, M} = convert(Tensor{order, dim, T1}, t) -@inline Base.convert(::Type{SymmetricTensor{order, dim, T1, M}}, t::SymmetricTensor{order, dim}) where {order, dim, T1, M} = convert(SymmetricTensor{order, dim, T1}, t) -@inline Base.convert(::Type{Tensor{order, dim, T1, M}}, t::SymmetricTensor{order, dim}) where {order, dim, T1, M} = convert(Tensor{order, dim, T1}, t) -@inline Base.convert(::Type{SymmetricTensor{order, dim, T1, M}}, t::Tensor{order, dim}) where {order, dim, T1, M} = convert(SymmetricTensor{order, dim, T1}, t) - -@inline Base.convert(::Type{Tensor{order, dim}}, t::SymmetricTensor{order, dim, T}) where {order, dim, T} = convert(Tensor{order, dim, T}, t) -@inline Base.convert(::Type{SymmetricTensor{order, dim}}, t::Tensor{order, dim, T}) where {order, dim, T} = convert(SymmetricTensor{order, dim, T}, t) -@inline Base.convert(::Type{Tensor}, t::SymmetricTensor{order, dim, T}) where {order, dim, T} = convert(Tensor{order, dim, T}, t) -@inline Base.convert(::Type{SymmetricTensor}, t::Tensor{order, dim, T}) where {order, dim, T} = convert(SymmetricTensor{order, dim, T}, t) - -# SymmetricTensor -> Tensor -@inline function Base.convert(::Type{Tensor{2, dim, T1}}, t::SymmetricTensor{2, dim, T2}) where {dim, T1, T2} - Tensor{2, dim}(@inline function(i,j) @inbounds T1(t[i,j]); end) -end - -@inline function Base.convert(::Type{Tensor{4, dim, T1}}, t::SymmetricTensor{4, dim, T2}) where {dim, T1, T2} - Tensor{4, dim}(@inline function(i,j,k,l) @inbounds T1(t[i,j,k,l]); end) -end - -# Tensor -> SymmetricTensor -@inline function Base.convert(::Type{SymmetricTensor{order, dim, T1}}, t::Tensor{order, dim}) where {dim, order, T1} - if issymmetric(t) - return convert(SymmetricTensor{order, dim, T1}, symmetric(t)) - else - throw(InexactError(:convert, SymmetricTensor{order, dim, T1}, t)) - end -end +############# +# Promotion # +############# + +# Promotion between two tensors promote the eltype and promotes +# symmetric tensors to tensors + +@inline function Base.promote_rule(::Type{SymmetricTensor{order, dim, A, M}}, + ::Type{SymmetricTensor{order, dim, B, M}}) where {dim, A, B, order, M} + SymmetricTensor{order, dim, promote_type(A, B), M} +end + +@inline function Base.promote_rule(::Type{Tensor{order, dim, A, M}}, + ::Type{Tensor{order, dim, B, M}}) where {dim, A, B, order, M} + Tensor{order, dim, promote_type(A, B), M} +end + +@inline function Base.promote_rule(::Type{SymmetricTensor{order, dim, A, M1}}, + ::Type{Tensor{order, dim, B, M2}}) where {dim, A, B, order, M1, M2} + Tensor{order, dim, promote_type(A, B), M2} +end + +@inline function Base.promote_rule(::Type{Tensor{order, dim, A, M1}}, + ::Type{SymmetricTensor{order, dim, B, M2}}) where {dim, A, B, order, M1, M2} + Tensor{order, dim, promote_type(A, B), M1} +end + +# inlined promote (promote in Base is not inlined) +@inline function Base.promote(S1::T, S2::S) where {T <: AbstractTensor, S <: AbstractTensor} + return convert(promote_type(T, S), S1), convert(promote_type(T, S), S2) +end +@inline Base.promote(S1::AbstractTensor{order, dim, T}) where {order, dim, T} = convert(Tensor{order, dim, T}, S1) + +# base promotion that only promotes SymmetricTensor to Tensor but leaves eltype +@inline function promote_base(S1::Tensor{order, dim}, S2::SymmetricTensor{order, dim}) where {order, dim} + return S1, convert(Tensor{order, dim}, S2) +end +@inline function promote_base(S1::SymmetricTensor{order, dim}, S2::Tensor{order, dim}) where {order, dim} + return convert(Tensor{order, dim}, S1), S2 +end + +############### +# Conversions # +############### + +# Identity conversions +@inline Base.convert(::Type{Tensor{order, dim, T}}, t::Tensor{order, dim, T}) where {order, dim, T} = t +@inline Base.convert(::Type{Tensor{order, dim, T, M}}, t::Tensor{order, dim, T, M}) where {order, dim, T, M} = t +@inline Base.convert(::Type{SymmetricTensor{order, dim, T}}, t::SymmetricTensor{order, dim, T}) where {order, dim, T} = t +@inline Base.convert(::Type{SymmetricTensor{order, dim, T, M}}, t::SymmetricTensor{order, dim, T, M}) where {order, dim, T, M} = t + +# Change element type +@inline function Base.convert(::Type{Tensor{order, dim, T1}}, t::Tensor{order, dim, T2}) where {order, dim, T1, T2} + apply_all(Tensor{order, dim}, @inline function(i) @inbounds T1(t.data[i]); end) +end + +@inline function Base.convert(::Type{SymmetricTensor{order, dim, T1}}, t::SymmetricTensor{order, dim, T2}) where {order, dim, T1, T2} + apply_all(SymmetricTensor{order, dim}, @inline function(i) @inbounds T1(t.data[i]); end) +end + +# Peel off the M but define these so that convert(typeof(...), ...) works +@inline Base.convert(::Type{Tensor{order, dim, T1, M}}, t::Tensor{order, dim}) where {order, dim, T1, M} = convert(Tensor{order, dim, T1}, t) +@inline Base.convert(::Type{SymmetricTensor{order, dim, T1, M}}, t::SymmetricTensor{order, dim}) where {order, dim, T1, M} = convert(SymmetricTensor{order, dim, T1}, t) +@inline Base.convert(::Type{Tensor{order, dim, T1, M}}, t::SymmetricTensor{order, dim}) where {order, dim, T1, M} = convert(Tensor{order, dim, T1}, t) +@inline Base.convert(::Type{SymmetricTensor{order, dim, T1, M}}, t::Tensor{order, dim}) where {order, dim, T1, M} = convert(SymmetricTensor{order, dim, T1}, t) + +@inline Base.convert(::Type{Tensor{order, dim}}, t::SymmetricTensor{order, dim, T}) where {order, dim, T} = convert(Tensor{order, dim, T}, t) +@inline Base.convert(::Type{SymmetricTensor{order, dim}}, t::Tensor{order, dim, T}) where {order, dim, T} = convert(SymmetricTensor{order, dim, T}, t) +@inline Base.convert(::Type{Tensor}, t::SymmetricTensor{order, dim, T}) where {order, dim, T} = convert(Tensor{order, dim, T}, t) +@inline Base.convert(::Type{SymmetricTensor}, t::Tensor{order, dim, T}) where {order, dim, T} = convert(SymmetricTensor{order, dim, T}, t) + +# SymmetricTensor -> Tensor +@inline function Base.convert(::Type{Tensor{2, dim, T1}}, t::SymmetricTensor{2, dim, T2}) where {dim, T1, T2} + Tensor{2, dim}(@inline function(i,j) @inbounds T1(t[i,j]); end) +end + +@inline function Base.convert(::Type{Tensor{4, dim, T1}}, t::SymmetricTensor{4, dim, T2}) where {dim, T1, T2} + Tensor{4, dim}(@inline function(i,j,k,l) @inbounds T1(t[i,j,k,l]); end) +end + +# Tensor -> SymmetricTensor +@inline function Base.convert(::Type{SymmetricTensor{order, dim, T1}}, t::Tensor{order, dim}) where {dim, order, T1} + if issymmetric(t) + return convert(SymmetricTensor{order, dim, T1}, symmetric(t)) + else + throw(InexactError(:convert, SymmetricTensor{order, dim, T1}, t)) + end +end diff --git a/src/simd.jl b/src/simd.jl index 7f05eaee..06843e13 100644 --- a/src/simd.jl +++ b/src/simd.jl @@ -1,445 +1,445 @@ -#= -This file contains explicit SIMD instructions for tensors. -Many of the methods defined outside this file will use SIMD-instructions -if julia is ran with -O3. Even if -O3 is enabled, the compiler is sometimes -thrown off guard, and therefore, explicit SIMD routines are -defined. This will enable SIMD-instructions even if julia is ran with -the default -O2. - -The functions here are only defined for tensors of the same -element type. Otherwise it does work. Promotion should take -care of this before the tensors enter the functions here. - -The file is organized as follows: -(1): + and - between tensors -(2): * and / between tensor and number -(3): dot -(4): dcontract -(5): otimes -(6): norm -=# - -import SIMD -const SVec{N, T} = SIMD.Vec{N, T} - -const SIMDTypes = Union{Float16, Float32, Float64} - -const AllSIMDTensors{T <: SIMDTypes} = Union{Tensor{1, 1, T, 1}, Tensor{1, 2, T, 2}, Tensor{1, 3, T, 3}, - Tensor{2, 1, T, 1}, Tensor{2, 2, T, 4}, Tensor{2, 3, T, 9}, - Tensor{4, 1, T, 1}, Tensor{4, 2, T, 16}, #=Tensor{4, 3, T, 81},=# - SymmetricTensor{2, 1, T, 1}, SymmetricTensor{2, 2, T, 3}, SymmetricTensor{2, 3, T, 6}, - SymmetricTensor{4, 1, T, 1}, SymmetricTensor{4, 2, T, 9}, SymmetricTensor{4, 3, T, 36}} - -# factors for the symmetric tensors, return a quote -function symmetric_factors(order, dim, T) - if order == 2 - dim == 1 && return :(SVec{1, $T}(($T(1),))) - dim == 2 && return :(SVec{3, $T}(($T(1),$T(2),$T(1)))) - dim == 3 && return :(SVec{6, $T}(($T(1),$T(2),$T(2),$T(1),$T(2),$T(1)))) - elseif order == 4 - dim == 1 && return :(SVec{1, $T}(($T(1),))) - dim == 2 && return :(SVec{9, $T}(($T(1),$T(2),$T(1),$T(2),$T(4),$T(2),$T(1),$T(2),$T(1)))) - dim == 3 && return :(SVec{36,$T}(($T(1),$T(2),$T(2),$T(1),$T(2),$T(1),$T(2),$T(4),$T(4),$T(2),$T(4),$T(2), - $T(2),$T(4),$T(4),$T(2),$T(4),$T(2),$T(1),$T(2),$T(2),$T(1),$T(2),$T(1), - $T(2),$T(4),$T(4),$T(2),$T(4),$T(2),$T(1),$T(2),$T(2),$T(1),$T(2),$T(1)))) - end -end - -# Tensor from SVec (Note: This needs to be two separate methods in order to dispatch correctly) -@generated function (::Type{Tensor{order, dim}})(r::SVec{N, T}) where {order, dim, N, T} - return quote - $(Expr(:meta, :inline)) - Tensor{$order, $dim}($(Expr(:tuple, [:(r[$i]) for i in 1:N]...))) - end -end -@generated function (::Type{SymmetricTensor{order, dim}})(r::SVec{N, T}) where {order, dim, N, T} - return quote - $(Expr(:meta, :inline)) - SymmetricTensor{$order, $dim}($(Expr(:tuple, [:(r[$i]) for i in 1:N]...))) - end -end -# constructor from several SVecs which happens in dot, dcontract and otimes -@generated function (::Type{Tensor{order, dim}})(r::NTuple{M, SVec{N, T}}) where {order, dim, M, N, T} - return quote - $(Expr(:meta, :inline)) - @inbounds return Tensor{$order, $dim}($(Expr(:tuple, [:(r[$j][$i]) for i in 1:N, j in 1:M]...))) - end -end -@generated function (::Type{SymmetricTensor{order, dim}})(r::NTuple{M, SVec{N, T}}) where {order, dim, M, N, T} - return quote - $(Expr(:meta, :inline)) - @inbounds return SymmetricTensor{$order, $dim}($(Expr(:tuple, [:(r[$j][$i]) for i in 1:N, j in 1:M]...))) - end -end - -# load Tensor into SVec -@inline tosimd(A::NTuple{N, T}) where {N, T} = SVec{N, T}(A) - -# load given range of linear indices into SVec -@generated function tosimd(D::NTuple{N, T}, ::Type{Val{strt}}, ::Type{Val{stp}}) where {N, T, strt, stp} - expr = Expr(:tuple, [:(D[$i]) for i in strt:stp]...) - M = length(expr.args) - return quote - $(Expr(:meta, :inline)) - @inbounds return SVec{$M, T}($expr) - end -end - -################################ -# (1): + and - between tensors # -################################ -@inline function Base.:+(S1::TT, S2::TT) where {TT <: AllSIMDTensors} - @inbounds begin - D1 = get_data(S1); SV1 = tosimd(D1) - D2 = get_data(S2); SV2 = tosimd(D2) - r = SV1 + SV2 - return get_base(TT)(r) - end -end -@inline function Base.:-(S1::TT, S2::TT) where {TT <: AllSIMDTensors} - @inbounds begin - D1 = get_data(S1); SV1 = tosimd(D1) - D2 = get_data(S2); SV2 = tosimd(D2) - r = SV1 - SV2 - return get_base(TT)(r) - end -end - -########################################## -# (2): * and / between tensor and number # -########################################## -@inline function Base.:*(n::T, S::AllSIMDTensors{T}) where {T <: SIMDTypes} - @inbounds begin - D = get_data(S); SV = tosimd(D) - r = n * SV - return get_base(typeof(S))(r) - end -end -@inline function Base.:*(S::AllSIMDTensors{T}, n::T) where {T <: SIMDTypes} - @inbounds begin - D = get_data(S); SV = tosimd(D) - r = SV * n - return get_base(typeof(S))(r) - end -end -@inline function Base.:/(S::AllSIMDTensors{T}, n::T) where {T <: SIMDTypes} - @inbounds begin - D = get_data(S); SV = tosimd(D) - r = SV / n - return get_base(typeof(S))(r) - end -end - -############ -# (3): dot # -############ -# 2s-1 -@inline LinearAlgebra.dot(S1::SymmetricTensor{2, dim, T}, S2::Vec{dim, T}) where {dim, T <: SIMDTypes} = dot(promote(S1), S2) -# 2-1 -@inline function LinearAlgebra.dot(S1::Tensor{2, 2, T}, S2::Vec{2, T}) where {T <: SIMDTypes} - @inbounds begin - D1 = get_data(S1); D2 = get_data(S2) - SV11 = tosimd(D1, Val{1}, Val{2}) - SV12 = tosimd(D1, Val{3}, Val{4}) - r = muladd(SV12, D2[2], SV11 * D2[1]) - return Tensor{1, 2}(r) - end -end -@inline function LinearAlgebra.dot(S1::Tensor{2, 3, T}, S2::Vec{3, T}) where {T <: SIMDTypes} - @inbounds begin - D1 = get_data(S1); D2 = get_data(S2) - SV11 = tosimd(D1, Val{1}, Val{3}) - SV12 = tosimd(D1, Val{4}, Val{6}) - SV13 = tosimd(D1, Val{7}, Val{9}) - r = muladd(SV13, D2[3], muladd(SV12, D2[2], SV11 * D2[1])) - return Tensor{1, 3}(r) - end -end - -# 2s-2 / 2-2s -@inline function LinearAlgebra.dot(S1::AbstractTensor{2, dim, T}, S2::AbstractTensor{2, dim, T}) where {dim, T <: SIMDTypes} - SS1, SS2 = promote_base(S1, S2); dot(SS1, SS2) -end -# 2-2 -@inline function LinearAlgebra.dot(S1::Tensor{2, 2, T}, S2::Tensor{2, 2, T}) where {T <: SIMDTypes} - @inbounds begin - D1 = get_data(S1); D2 = get_data(S2) - SV11 = tosimd(D1, Val{1}, Val{2}) - SV12 = tosimd(D1, Val{3}, Val{4}) - r1 = muladd(SV12, D2[2], SV11 * D2[1]) - r2 = muladd(SV12, D2[4], SV11 * D2[3]) - return Tensor{2, 2}((r1, r2)) - end -end -@inline function LinearAlgebra.dot(S1::Tensor{2, 3, T}, S2::Tensor{2, 3, T}) where {T <: SIMDTypes} - @inbounds begin - D1 = get_data(S1); D2 = get_data(S2) - SV11 = tosimd(D1, Val{1}, Val{3}) - SV12 = tosimd(D1, Val{4}, Val{6}) - SV13 = tosimd(D1, Val{7}, Val{9}) - r1 = muladd(SV13, D2[3], muladd(SV12, D2[2], SV11 * D2[1])) - r2 = muladd(SV13, D2[6], muladd(SV12, D2[5], SV11 * D2[4])) - r3 = muladd(SV13, D2[9], muladd(SV12, D2[8], SV11 * D2[7])) - return Tensor{2, 3}((r1, r2, r3)) - end -end - -################## -# (4): dcontract # -################## -# 2s-2 / 2-2s -@inline function dcontract(S1::AbstractTensor{2, dim, T}, S2::AbstractTensor{2, dim, T}) where {dim, T <: SIMDTypes} - SS1, SS2 = promote_base(S1, S2); dcontract(SS1, SS2) -end -# 2-2 -@inline function dcontract(S1::Tensor{2, dim, T}, S2::Tensor{2, dim, T}) where {dim, T <: SIMDTypes} - SV1 = tosimd(get_data(S1)) - SV2 = tosimd(get_data(S2)) - r = SV1 * SV2 - return sum(r) -end -# 2s-2s -@generated function dcontract(S1::SymmetricTensor{2, dim, T}, S2::SymmetricTensor{2, dim, T}) where {dim, T <: SIMDTypes} - F = symmetric_factors(2, dim, T) - return quote - $(Expr(:meta, :inline)) - F = $F - SV1 = tosimd(get_data(S1)) - SV2 = tosimd(get_data(S2)) - SV1SV2 = SV1 * SV2; FSV1SV2 = F * SV1SV2 - return sum(FSV1SV2) - end -end - -# 4-2s -@inline dcontract(S1::Tensor{4, 3, T}, S2::SymmetricTensor{2, 3, T}) where {T <: SIMDTypes} = dcontract(S1, promote(S2)) -# 4-2 -@inline function dcontract(S1::Tensor{4, 2, T}, S2::Tensor{2, 2, T}) where {T <: SIMDTypes} - @inbounds begin - D1 = get_data(S1); D2 = get_data(S2) - SV11 = tosimd(D1, Val{1}, Val{4}) - SV12 = tosimd(D1, Val{5}, Val{8}) - SV13 = tosimd(D1, Val{9}, Val{12}) - SV14 = tosimd(D1, Val{13}, Val{16}) - r = muladd(SV14, D2[4], muladd(SV13, D2[3], muladd(SV12, D2[2], SV11 * D2[1]))) - return Tensor{2, 2}(r) - end -end -@inline function dcontract(S1::Tensor{4, 3, T}, S2::Tensor{2, 3, T}) where {T <: SIMDTypes} - @inbounds begin - D1 = get_data(S1); D2 = get_data(S2) - SV11 = tosimd(D1, Val{1}, Val{9}) - SV12 = tosimd(D1, Val{10}, Val{18}) - SV13 = tosimd(D1, Val{19}, Val{27}) - SV14 = tosimd(D1, Val{28}, Val{36}) - SV15 = tosimd(D1, Val{37}, Val{45}) - SV16 = tosimd(D1, Val{46}, Val{54}) - SV17 = tosimd(D1, Val{55}, Val{63}) - SV18 = tosimd(D1, Val{64}, Val{72}) - SV19 = tosimd(D1, Val{73}, Val{81}) - r = muladd(SV19, D2[9], muladd(SV18, D2[8], muladd(SV17, D2[7], muladd(SV16, D2[6], muladd(SV15, D2[5], muladd(SV14, D2[4], muladd(SV13, D2[3], muladd(SV12, D2[2], SV11 * D2[1])))))))) - return Tensor{2, 3}(r) - end -end -@inline function dcontract(S1::SymmetricTensor{4, 3, T}, S2::SymmetricTensor{2, 3, T}) where {T <: SIMDTypes} - @inbounds begin - D1 = get_data(S1); D2 = get_data(S2) - SV11 = tosimd(D1, Val{1}, Val{6}) - SV12 = tosimd(D1, Val{7}, Val{12}) - SV13 = tosimd(D1, Val{13}, Val{18}) - SV14 = tosimd(D1, Val{19}, Val{24}) - SV15 = tosimd(D1, Val{25}, Val{30}) - SV16 = tosimd(D1, Val{31}, Val{36}) - D21 = D2[1]; D22 = D2[2] * T(2); D23 = D2[3] * T(2) - D24 = D2[4]; D25 = D2[5] * T(2); D26 = D2[6] - r = muladd(SV16, D26, muladd(SV15, D25, muladd(SV14, D24, muladd(SV13, D23, muladd(SV12, D22, SV11 * D21))))) - return SymmetricTensor{2, 3}(r) - end -end - -# 4s-4 / 4-4s -@inline function dcontract(S1::AbstractTensor{4, dim, T}, S2::AbstractTensor{4, dim, T}) where {dim, T <: SIMDTypes} - SS1, SS2 = promote_base(S1, S2); dcontract(SS1, SS2) -end -# 4-4 -@inline function dcontract(S1::Tensor{4, 2, T}, S2::Tensor{4, 2, T}) where {T <: SIMDTypes} - @inbounds begin - D1 = get_data(S1); D2 = get_data(S2) - SV11 = tosimd(D1, Val{1}, Val{4}) - SV12 = tosimd(D1, Val{5}, Val{8}) - SV13 = tosimd(D1, Val{9}, Val{12}) - SV14 = tosimd(D1, Val{13}, Val{16}) - r1 = muladd(SV14, D2[4], muladd(SV13, D2[3], muladd(SV12, D2[2], SV11 * D2[1]))) - r2 = muladd(SV14, D2[8], muladd(SV13, D2[7], muladd(SV12, D2[6], SV11 * D2[5]))) - r3 = muladd(SV14, D2[12], muladd(SV13, D2[11], muladd(SV12, D2[10], SV11 * D2[9]))) - r4 = muladd(SV14, D2[16], muladd(SV13, D2[15], muladd(SV12, D2[14], SV11 * D2[13]))) - return Tensor{4, 2}((r1, r2, r3, r4)) - end -end -function dcontract(S1::Tensor{4, 3, T}, S2::Tensor{4, 3, T}) where {T <: SIMDTypes} - @inbounds begin - D1 = get_data(S1); D2 = get_data(S2) - SV11 = tosimd(D1, Val{1}, Val{9}) - SV12 = tosimd(D1, Val{10}, Val{18}) - SV13 = tosimd(D1, Val{19}, Val{27}) - SV14 = tosimd(D1, Val{28}, Val{36}) - SV15 = tosimd(D1, Val{37}, Val{45}) - SV16 = tosimd(D1, Val{46}, Val{54}) - SV17 = tosimd(D1, Val{55}, Val{63}) - SV18 = tosimd(D1, Val{64}, Val{72}) - SV19 = tosimd(D1, Val{73}, Val{81}) - r1 = muladd(SV19, D2[9], muladd(SV18, D2[8], muladd(SV17, D2[7], muladd(SV16, D2[6], muladd(SV15, D2[5], muladd(SV14, D2[4], muladd(SV13, D2[3], muladd(SV12, D2[2], SV11 * D2[1] )))))))) - r2 = muladd(SV19, D2[18], muladd(SV18, D2[17], muladd(SV17, D2[16], muladd(SV16, D2[15], muladd(SV15, D2[14], muladd(SV14, D2[13], muladd(SV13, D2[12], muladd(SV12, D2[11], SV11 * D2[10])))))))) - r3 = muladd(SV19, D2[27], muladd(SV18, D2[26], muladd(SV17, D2[25], muladd(SV16, D2[24], muladd(SV15, D2[23], muladd(SV14, D2[22], muladd(SV13, D2[21], muladd(SV12, D2[20], SV11 * D2[19])))))))) - r4 = muladd(SV19, D2[36], muladd(SV18, D2[35], muladd(SV17, D2[34], muladd(SV16, D2[33], muladd(SV15, D2[32], muladd(SV14, D2[31], muladd(SV13, D2[30], muladd(SV12, D2[29], SV11 * D2[28])))))))) - r5 = muladd(SV19, D2[45], muladd(SV18, D2[44], muladd(SV17, D2[43], muladd(SV16, D2[42], muladd(SV15, D2[41], muladd(SV14, D2[40], muladd(SV13, D2[39], muladd(SV12, D2[38], SV11 * D2[37])))))))) - r6 = muladd(SV19, D2[54], muladd(SV18, D2[53], muladd(SV17, D2[52], muladd(SV16, D2[51], muladd(SV15, D2[50], muladd(SV14, D2[49], muladd(SV13, D2[48], muladd(SV12, D2[47], SV11 * D2[46])))))))) - r7 = muladd(SV19, D2[63], muladd(SV18, D2[62], muladd(SV17, D2[61], muladd(SV16, D2[60], muladd(SV15, D2[59], muladd(SV14, D2[58], muladd(SV13, D2[57], muladd(SV12, D2[56], SV11 * D2[55])))))))) - r8 = muladd(SV19, D2[72], muladd(SV18, D2[71], muladd(SV17, D2[70], muladd(SV16, D2[69], muladd(SV15, D2[68], muladd(SV14, D2[67], muladd(SV13, D2[66], muladd(SV12, D2[65], SV11 * D2[64])))))))) - r9 = muladd(SV19, D2[81], muladd(SV18, D2[80], muladd(SV17, D2[79], muladd(SV16, D2[78], muladd(SV15, D2[77], muladd(SV14, D2[76], muladd(SV13, D2[75], muladd(SV12, D2[74], SV11 * D2[73])))))))) - return Tensor{4, 3}((r1, r2, r3, r4, r5, r6, r7, r8, r9)) - end -end -@inline function dcontract(S1::SymmetricTensor{4, 3, T}, S2::Tensor{2, 3, T}) where {T <: SIMDTypes} - @inbounds begin - D1 = get_data(S1); D2 = get_data(S2) - SV11 = tosimd(D1, Val{1}, Val{6}) - SV12 = tosimd(D1, Val{7}, Val{12}) - SV13 = tosimd(D1, Val{13}, Val{18}) - SV14 = tosimd(D1, Val{19}, Val{24}) - SV15 = tosimd(D1, Val{25}, Val{30}) - SV16 = tosimd(D1, Val{31}, Val{36}) - D21 = D2[1]; D22 = D2[2] + D2[4]; D23 = D2[3] + D2[7] - D24 = D2[5]; D25 = D2[6] + D2[8]; D26 = D2[9] - r = muladd(SV16, D26, muladd(SV15, D25, muladd(SV14, D24, muladd(SV13, D23, muladd(SV12, D22, SV11 * D21))))) - return SymmetricTensor{2, 3}(r) - end -end -@inline function dcontract(S1::SymmetricTensor{4, 2, T}, S2::SymmetricTensor{4, 2, T}) where {T <: SIMDTypes} - @inbounds begin - D1 = get_data(S1); D2 = get_data(S2) - SV11 = tosimd(D1, Val{1}, Val{3}) - SV12 = tosimd(D1, Val{4}, Val{6}) - SV13 = tosimd(D1, Val{7}, Val{9}) - D21 = D2[1]; D22 = D2[2] * T(2); D23 = D2[3] - D24 = D2[4]; D25 = D2[5] * T(2); D26 = D2[6] - D27 = D2[7]; D28 = D2[8] * T(2); D29 = D2[9] - r1 = muladd(SV13, D23, muladd(SV12, D22, SV11 * D21)) - r2 = muladd(SV13, D26, muladd(SV12, D25, SV11 * D24)) - r3 = muladd(SV13, D29, muladd(SV12, D28, SV11 * D27)) - return SymmetricTensor{4, 2}((r1, r2, r3)) - end -end -function dcontract(S1::SymmetricTensor{4, 3, T}, S2::SymmetricTensor{4, 3, T}) where {T <: SIMDTypes} - @inbounds begin - D1 = get_data(S1); D2 = get_data(S2) - SV11 = tosimd(D1, Val{1}, Val{6}) - SV12 = tosimd(D1, Val{7}, Val{12}) - SV13 = tosimd(D1, Val{13}, Val{18}) - SV14 = tosimd(D1, Val{19}, Val{24}) - SV15 = tosimd(D1, Val{25}, Val{30}) - SV16 = tosimd(D1, Val{31}, Val{36}) - D21 = D2[1]; D22 = D2[2] * T(2); D23 = D2[3] * T(2); D24 = D2[4]; D25 = D2[5] * T(2); D26 = D2[6] - D27 = D2[7]; D28 = D2[8] * T(2); D29 = D2[9] * T(2); D210 = D2[10]; D211 = D2[11] * T(2); D212 = D2[12] - D213 = D2[13]; D214 = D2[14] * T(2); D215 = D2[15] * T(2); D216 = D2[16]; D217 = D2[17] * T(2); D218 = D2[18] - D219 = D2[19]; D220 = D2[20] * T(2); D221 = D2[21] * T(2); D222 = D2[22]; D223 = D2[23] * T(2); D224 = D2[24] - D225 = D2[25]; D226 = D2[26] * T(2); D227 = D2[27] * T(2); D228 = D2[28]; D229 = D2[29] * T(2); D230 = D2[30] - D231 = D2[31]; D232 = D2[32] * T(2); D233 = D2[33] * T(2); D234 = D2[34]; D235 = D2[35] * T(2); D236 = D2[36] - r1 = muladd(SV16, D26, muladd(SV15, D25, muladd(SV14, D24, muladd(SV13, D23, muladd(SV12, D22, SV11 * D21 ))))) - r2 = muladd(SV16, D212, muladd(SV15, D211, muladd(SV14, D210, muladd(SV13, D29, muladd(SV12, D28, SV11 * D27 ))))) - r3 = muladd(SV16, D218, muladd(SV15, D217, muladd(SV14, D216, muladd(SV13, D215, muladd(SV12, D214, SV11 * D213))))) - r4 = muladd(SV16, D224, muladd(SV15, D223, muladd(SV14, D222, muladd(SV13, D221, muladd(SV12, D220, SV11 * D219))))) - r5 = muladd(SV16, D230, muladd(SV15, D229, muladd(SV14, D228, muladd(SV13, D227, muladd(SV12, D226, SV11 * D225))))) - r6 = muladd(SV16, D236, muladd(SV15, D235, muladd(SV14, D234, muladd(SV13, D233, muladd(SV12, D232, SV11 * D231))))) - return SymmetricTensor{4, 3}((r1, r2, r3, r4, r5, r6)) - end -end - -############### -# (5): otimes # -############### -# 1-1 -@inline function otimes(S1::Vec{2, T}, S2::Vec{2, T}) where {T <: SIMDTypes} - @inbounds begin - D1 = get_data(S1); D2 = get_data(S2) - SV1 = tosimd(D1) - r1 = SV1 * D2[1] - r2 = SV1 * D2[2] - return Tensor{2, 2}((r1, r2)) - end -end -@inline function otimes(S1::Vec{3, T}, S2::Vec{3, T}) where {T <: SIMDTypes} - @inbounds begin - D1 = get_data(S1); D2 = get_data(S2) - SV1 = tosimd(D1) - r1 = SV1 * D2[1]; r2 = SV1 * D2[2]; r3 = SV1 * D2[3] - return Tensor{2, 3}((r1, r2, r3)) - end -end - -# 2s-2 / 2-2s -@inline function otimes(S1::AbstractTensor{2, dim, T}, S2::AbstractTensor{2, dim, T}) where {dim, T <: SIMDTypes} - SS1, SS2 = promote_base(S1, S2); otimes(SS1, SS2) -end - -# 2-2 -@inline function otimes(S1::Tensor{2, 2, T}, S2::Tensor{2, 2, T}) where {T <: SIMDTypes} - @inbounds begin - D1 = get_data(S1); D2 = get_data(S2) - SV1 = tosimd(D1) - r1 = SV1 * D2[1]; r2 = SV1 * D2[2]; r3 = SV1 * D2[3]; r4 = SV1 * D2[4] - return Tensor{4, 2}((r1, r2, r3, r4)) - end -end -@inline function otimes(S1::Tensor{2, 3, T}, S2::Tensor{2, 3, T}) where {T <: SIMDTypes} - @inbounds begin - D1 = get_data(S1); D2 = get_data(S2) - SV1 = tosimd(D1) - r1 = SV1 * D2[1]; r2 = SV1 * D2[2]; r3 = SV1 * D2[3] - r4 = SV1 * D2[4]; r5 = SV1 * D2[5]; r6 = SV1 * D2[6] - r7 = SV1 * D2[7]; r8 = SV1 * D2[8]; r9 = SV1 * D2[9] - return Tensor{4, 3}((r1, r2, r3, r4, r5, r6, r7, r8, r9)) - end -end - -# 2s-2s -@inline function otimes(S1::SymmetricTensor{2, 2, T}, S2::SymmetricTensor{2, 2, T}) where {T <: SIMDTypes} - @inbounds begin - D1 = get_data(S1); D2 = get_data(S2) - SV1 = tosimd(D1) - r1 = SV1 * D2[1]; r2 = SV1 * D2[2]; r3 = SV1 * D2[3] - return SymmetricTensor{4, 2}((r1, r2, r3)) - end -end -@inline function otimes(S1::SymmetricTensor{2, 3, T}, S2::SymmetricTensor{2, 3, T}) where {T <: SIMDTypes} - @inbounds begin - D1 = get_data(S1); D2 = get_data(S2) - SV1 = tosimd(D1) - r1 = SV1 * D2[1]; r2 = SV1 * D2[2]; r3 = SV1 * D2[3] - r4 = SV1 * D2[4]; r5 = SV1 * D2[5]; r6 = SV1 * D2[6] - return SymmetricTensor{4, 3}((r1, r2, r3, r4, r5, r6)) - end -end - -############# -# (6): norm # -############# -# order 1 and order 2 norms rely on dot and dcontract respectively -@inline function LinearAlgebra.norm(S::Tensor{4, dim, T}) where {dim, T <: SIMDTypes} - @inbounds begin - SV = tosimd(get_data(S)) - SVSV = SV * SV - r = sum(SVSV) - return sqrt(r) - end -end -@generated function LinearAlgebra.norm(S::SymmetricTensor{4, dim, T}) where {dim, T <: SIMDTypes} - F = symmetric_factors(4, dim, T) - return quote - $(Expr(:meta, :inline)) - F = $F - SV = tosimd(get_data(S)) - SVSV = SV * SV; FSVSV = F * SVSV; r = sum(FSVSV) - return sqrt(r) - end -end +#= +This file contains explicit SIMD instructions for tensors. +Many of the methods defined outside this file will use SIMD-instructions +if julia is ran with -O3. Even if -O3 is enabled, the compiler is sometimes +thrown off guard, and therefore, explicit SIMD routines are +defined. This will enable SIMD-instructions even if julia is ran with +the default -O2. + +The functions here are only defined for tensors of the same +element type. Otherwise it does work. Promotion should take +care of this before the tensors enter the functions here. + +The file is organized as follows: +(1): + and - between tensors +(2): * and / between tensor and number +(3): dot +(4): dcontract +(5): otimes +(6): norm +=# + +import SIMD +const SVec{N, T} = SIMD.Vec{N, T} + +const SIMDTypes = Union{Float16, Float32, Float64} + +const AllSIMDTensors{T <: SIMDTypes} = Union{Tensor{1, 1, T, 1}, Tensor{1, 2, T, 2}, Tensor{1, 3, T, 3}, + Tensor{2, 1, T, 1}, Tensor{2, 2, T, 4}, Tensor{2, 3, T, 9}, + Tensor{4, 1, T, 1}, Tensor{4, 2, T, 16}, #=Tensor{4, 3, T, 81},=# + SymmetricTensor{2, 1, T, 1}, SymmetricTensor{2, 2, T, 3}, SymmetricTensor{2, 3, T, 6}, + SymmetricTensor{4, 1, T, 1}, SymmetricTensor{4, 2, T, 9}, SymmetricTensor{4, 3, T, 36}} + +# factors for the symmetric tensors, return a quote +function symmetric_factors(order, dim, T) + if order == 2 + dim == 1 && return :(SVec{1, $T}(($T(1),))) + dim == 2 && return :(SVec{3, $T}(($T(1),$T(2),$T(1)))) + dim == 3 && return :(SVec{6, $T}(($T(1),$T(2),$T(2),$T(1),$T(2),$T(1)))) + elseif order == 4 + dim == 1 && return :(SVec{1, $T}(($T(1),))) + dim == 2 && return :(SVec{9, $T}(($T(1),$T(2),$T(1),$T(2),$T(4),$T(2),$T(1),$T(2),$T(1)))) + dim == 3 && return :(SVec{36,$T}(($T(1),$T(2),$T(2),$T(1),$T(2),$T(1),$T(2),$T(4),$T(4),$T(2),$T(4),$T(2), + $T(2),$T(4),$T(4),$T(2),$T(4),$T(2),$T(1),$T(2),$T(2),$T(1),$T(2),$T(1), + $T(2),$T(4),$T(4),$T(2),$T(4),$T(2),$T(1),$T(2),$T(2),$T(1),$T(2),$T(1)))) + end +end + +# Tensor from SVec (Note: This needs to be two separate methods in order to dispatch correctly) +@generated function (::Type{Tensor{order, dim}})(r::SVec{N, T}) where {order, dim, N, T} + return quote + $(Expr(:meta, :inline)) + Tensor{$order, $dim}($(Expr(:tuple, [:(r[$i]) for i in 1:N]...))) + end +end +@generated function (::Type{SymmetricTensor{order, dim}})(r::SVec{N, T}) where {order, dim, N, T} + return quote + $(Expr(:meta, :inline)) + SymmetricTensor{$order, $dim}($(Expr(:tuple, [:(r[$i]) for i in 1:N]...))) + end +end +# constructor from several SVecs which happens in dot, dcontract and otimes +@generated function (::Type{Tensor{order, dim}})(r::NTuple{M, SVec{N, T}}) where {order, dim, M, N, T} + return quote + $(Expr(:meta, :inline)) + @inbounds return Tensor{$order, $dim}($(Expr(:tuple, [:(r[$j][$i]) for i in 1:N, j in 1:M]...))) + end +end +@generated function (::Type{SymmetricTensor{order, dim}})(r::NTuple{M, SVec{N, T}}) where {order, dim, M, N, T} + return quote + $(Expr(:meta, :inline)) + @inbounds return SymmetricTensor{$order, $dim}($(Expr(:tuple, [:(r[$j][$i]) for i in 1:N, j in 1:M]...))) + end +end + +# load Tensor into SVec +@inline tosimd(A::NTuple{N, T}) where {N, T} = SVec{N, T}(A) + +# load given range of linear indices into SVec +@generated function tosimd(D::NTuple{N, T}, ::Type{Val{strt}}, ::Type{Val{stp}}) where {N, T, strt, stp} + expr = Expr(:tuple, [:(D[$i]) for i in strt:stp]...) + M = length(expr.args) + return quote + $(Expr(:meta, :inline)) + @inbounds return SVec{$M, T}($expr) + end +end + +################################ +# (1): + and - between tensors # +################################ +@inline function Base.:+(S1::TT, S2::TT) where {TT <: AllSIMDTensors} + @inbounds begin + D1 = get_data(S1); SV1 = tosimd(D1) + D2 = get_data(S2); SV2 = tosimd(D2) + r = SV1 + SV2 + return get_base(TT)(r) + end +end +@inline function Base.:-(S1::TT, S2::TT) where {TT <: AllSIMDTensors} + @inbounds begin + D1 = get_data(S1); SV1 = tosimd(D1) + D2 = get_data(S2); SV2 = tosimd(D2) + r = SV1 - SV2 + return get_base(TT)(r) + end +end + +########################################## +# (2): * and / between tensor and number # +########################################## +@inline function Base.:*(n::T, S::AllSIMDTensors{T}) where {T <: SIMDTypes} + @inbounds begin + D = get_data(S); SV = tosimd(D) + r = n * SV + return get_base(typeof(S))(r) + end +end +@inline function Base.:*(S::AllSIMDTensors{T}, n::T) where {T <: SIMDTypes} + @inbounds begin + D = get_data(S); SV = tosimd(D) + r = SV * n + return get_base(typeof(S))(r) + end +end +@inline function Base.:/(S::AllSIMDTensors{T}, n::T) where {T <: SIMDTypes} + @inbounds begin + D = get_data(S); SV = tosimd(D) + r = SV / n + return get_base(typeof(S))(r) + end +end + +############ +# (3): dot # +############ +# 2s-1 +@inline LinearAlgebra.dot(S1::SymmetricTensor{2, dim, T}, S2::Vec{dim, T}) where {dim, T <: SIMDTypes} = dot(promote(S1), S2) +# 2-1 +@inline function LinearAlgebra.dot(S1::Tensor{2, 2, T}, S2::Vec{2, T}) where {T <: SIMDTypes} + @inbounds begin + D1 = get_data(S1); D2 = get_data(S2) + SV11 = tosimd(D1, Val{1}, Val{2}) + SV12 = tosimd(D1, Val{3}, Val{4}) + r = muladd(SV12, D2[2], SV11 * D2[1]) + return Tensor{1, 2}(r) + end +end +@inline function LinearAlgebra.dot(S1::Tensor{2, 3, T}, S2::Vec{3, T}) where {T <: SIMDTypes} + @inbounds begin + D1 = get_data(S1); D2 = get_data(S2) + SV11 = tosimd(D1, Val{1}, Val{3}) + SV12 = tosimd(D1, Val{4}, Val{6}) + SV13 = tosimd(D1, Val{7}, Val{9}) + r = muladd(SV13, D2[3], muladd(SV12, D2[2], SV11 * D2[1])) + return Tensor{1, 3}(r) + end +end + +# 2s-2 / 2-2s +@inline function LinearAlgebra.dot(S1::AbstractTensor{2, dim, T}, S2::AbstractTensor{2, dim, T}) where {dim, T <: SIMDTypes} + SS1, SS2 = promote_base(S1, S2); dot(SS1, SS2) +end +# 2-2 +@inline function LinearAlgebra.dot(S1::Tensor{2, 2, T}, S2::Tensor{2, 2, T}) where {T <: SIMDTypes} + @inbounds begin + D1 = get_data(S1); D2 = get_data(S2) + SV11 = tosimd(D1, Val{1}, Val{2}) + SV12 = tosimd(D1, Val{3}, Val{4}) + r1 = muladd(SV12, D2[2], SV11 * D2[1]) + r2 = muladd(SV12, D2[4], SV11 * D2[3]) + return Tensor{2, 2}((r1, r2)) + end +end +@inline function LinearAlgebra.dot(S1::Tensor{2, 3, T}, S2::Tensor{2, 3, T}) where {T <: SIMDTypes} + @inbounds begin + D1 = get_data(S1); D2 = get_data(S2) + SV11 = tosimd(D1, Val{1}, Val{3}) + SV12 = tosimd(D1, Val{4}, Val{6}) + SV13 = tosimd(D1, Val{7}, Val{9}) + r1 = muladd(SV13, D2[3], muladd(SV12, D2[2], SV11 * D2[1])) + r2 = muladd(SV13, D2[6], muladd(SV12, D2[5], SV11 * D2[4])) + r3 = muladd(SV13, D2[9], muladd(SV12, D2[8], SV11 * D2[7])) + return Tensor{2, 3}((r1, r2, r3)) + end +end + +################## +# (4): dcontract # +################## +# 2s-2 / 2-2s +@inline function dcontract(S1::AbstractTensor{2, dim, T}, S2::AbstractTensor{2, dim, T}) where {dim, T <: SIMDTypes} + SS1, SS2 = promote_base(S1, S2); dcontract(SS1, SS2) +end +# 2-2 +@inline function dcontract(S1::Tensor{2, dim, T}, S2::Tensor{2, dim, T}) where {dim, T <: SIMDTypes} + SV1 = tosimd(get_data(S1)) + SV2 = tosimd(get_data(S2)) + r = SV1 * SV2 + return sum(r) +end +# 2s-2s +@generated function dcontract(S1::SymmetricTensor{2, dim, T}, S2::SymmetricTensor{2, dim, T}) where {dim, T <: SIMDTypes} + F = symmetric_factors(2, dim, T) + return quote + $(Expr(:meta, :inline)) + F = $F + SV1 = tosimd(get_data(S1)) + SV2 = tosimd(get_data(S2)) + SV1SV2 = SV1 * SV2; FSV1SV2 = F * SV1SV2 + return sum(FSV1SV2) + end +end + +# 4-2s +@inline dcontract(S1::Tensor{4, 3, T}, S2::SymmetricTensor{2, 3, T}) where {T <: SIMDTypes} = dcontract(S1, promote(S2)) +# 4-2 +@inline function dcontract(S1::Tensor{4, 2, T}, S2::Tensor{2, 2, T}) where {T <: SIMDTypes} + @inbounds begin + D1 = get_data(S1); D2 = get_data(S2) + SV11 = tosimd(D1, Val{1}, Val{4}) + SV12 = tosimd(D1, Val{5}, Val{8}) + SV13 = tosimd(D1, Val{9}, Val{12}) + SV14 = tosimd(D1, Val{13}, Val{16}) + r = muladd(SV14, D2[4], muladd(SV13, D2[3], muladd(SV12, D2[2], SV11 * D2[1]))) + return Tensor{2, 2}(r) + end +end +@inline function dcontract(S1::Tensor{4, 3, T}, S2::Tensor{2, 3, T}) where {T <: SIMDTypes} + @inbounds begin + D1 = get_data(S1); D2 = get_data(S2) + SV11 = tosimd(D1, Val{1}, Val{9}) + SV12 = tosimd(D1, Val{10}, Val{18}) + SV13 = tosimd(D1, Val{19}, Val{27}) + SV14 = tosimd(D1, Val{28}, Val{36}) + SV15 = tosimd(D1, Val{37}, Val{45}) + SV16 = tosimd(D1, Val{46}, Val{54}) + SV17 = tosimd(D1, Val{55}, Val{63}) + SV18 = tosimd(D1, Val{64}, Val{72}) + SV19 = tosimd(D1, Val{73}, Val{81}) + r = muladd(SV19, D2[9], muladd(SV18, D2[8], muladd(SV17, D2[7], muladd(SV16, D2[6], muladd(SV15, D2[5], muladd(SV14, D2[4], muladd(SV13, D2[3], muladd(SV12, D2[2], SV11 * D2[1])))))))) + return Tensor{2, 3}(r) + end +end +@inline function dcontract(S1::SymmetricTensor{4, 3, T}, S2::SymmetricTensor{2, 3, T}) where {T <: SIMDTypes} + @inbounds begin + D1 = get_data(S1); D2 = get_data(S2) + SV11 = tosimd(D1, Val{1}, Val{6}) + SV12 = tosimd(D1, Val{7}, Val{12}) + SV13 = tosimd(D1, Val{13}, Val{18}) + SV14 = tosimd(D1, Val{19}, Val{24}) + SV15 = tosimd(D1, Val{25}, Val{30}) + SV16 = tosimd(D1, Val{31}, Val{36}) + D21 = D2[1]; D22 = D2[2] * T(2); D23 = D2[3] * T(2) + D24 = D2[4]; D25 = D2[5] * T(2); D26 = D2[6] + r = muladd(SV16, D26, muladd(SV15, D25, muladd(SV14, D24, muladd(SV13, D23, muladd(SV12, D22, SV11 * D21))))) + return SymmetricTensor{2, 3}(r) + end +end + +# 4s-4 / 4-4s +@inline function dcontract(S1::AbstractTensor{4, dim, T}, S2::AbstractTensor{4, dim, T}) where {dim, T <: SIMDTypes} + SS1, SS2 = promote_base(S1, S2); dcontract(SS1, SS2) +end +# 4-4 +@inline function dcontract(S1::Tensor{4, 2, T}, S2::Tensor{4, 2, T}) where {T <: SIMDTypes} + @inbounds begin + D1 = get_data(S1); D2 = get_data(S2) + SV11 = tosimd(D1, Val{1}, Val{4}) + SV12 = tosimd(D1, Val{5}, Val{8}) + SV13 = tosimd(D1, Val{9}, Val{12}) + SV14 = tosimd(D1, Val{13}, Val{16}) + r1 = muladd(SV14, D2[4], muladd(SV13, D2[3], muladd(SV12, D2[2], SV11 * D2[1]))) + r2 = muladd(SV14, D2[8], muladd(SV13, D2[7], muladd(SV12, D2[6], SV11 * D2[5]))) + r3 = muladd(SV14, D2[12], muladd(SV13, D2[11], muladd(SV12, D2[10], SV11 * D2[9]))) + r4 = muladd(SV14, D2[16], muladd(SV13, D2[15], muladd(SV12, D2[14], SV11 * D2[13]))) + return Tensor{4, 2}((r1, r2, r3, r4)) + end +end +function dcontract(S1::Tensor{4, 3, T}, S2::Tensor{4, 3, T}) where {T <: SIMDTypes} + @inbounds begin + D1 = get_data(S1); D2 = get_data(S2) + SV11 = tosimd(D1, Val{1}, Val{9}) + SV12 = tosimd(D1, Val{10}, Val{18}) + SV13 = tosimd(D1, Val{19}, Val{27}) + SV14 = tosimd(D1, Val{28}, Val{36}) + SV15 = tosimd(D1, Val{37}, Val{45}) + SV16 = tosimd(D1, Val{46}, Val{54}) + SV17 = tosimd(D1, Val{55}, Val{63}) + SV18 = tosimd(D1, Val{64}, Val{72}) + SV19 = tosimd(D1, Val{73}, Val{81}) + r1 = muladd(SV19, D2[9], muladd(SV18, D2[8], muladd(SV17, D2[7], muladd(SV16, D2[6], muladd(SV15, D2[5], muladd(SV14, D2[4], muladd(SV13, D2[3], muladd(SV12, D2[2], SV11 * D2[1] )))))))) + r2 = muladd(SV19, D2[18], muladd(SV18, D2[17], muladd(SV17, D2[16], muladd(SV16, D2[15], muladd(SV15, D2[14], muladd(SV14, D2[13], muladd(SV13, D2[12], muladd(SV12, D2[11], SV11 * D2[10])))))))) + r3 = muladd(SV19, D2[27], muladd(SV18, D2[26], muladd(SV17, D2[25], muladd(SV16, D2[24], muladd(SV15, D2[23], muladd(SV14, D2[22], muladd(SV13, D2[21], muladd(SV12, D2[20], SV11 * D2[19])))))))) + r4 = muladd(SV19, D2[36], muladd(SV18, D2[35], muladd(SV17, D2[34], muladd(SV16, D2[33], muladd(SV15, D2[32], muladd(SV14, D2[31], muladd(SV13, D2[30], muladd(SV12, D2[29], SV11 * D2[28])))))))) + r5 = muladd(SV19, D2[45], muladd(SV18, D2[44], muladd(SV17, D2[43], muladd(SV16, D2[42], muladd(SV15, D2[41], muladd(SV14, D2[40], muladd(SV13, D2[39], muladd(SV12, D2[38], SV11 * D2[37])))))))) + r6 = muladd(SV19, D2[54], muladd(SV18, D2[53], muladd(SV17, D2[52], muladd(SV16, D2[51], muladd(SV15, D2[50], muladd(SV14, D2[49], muladd(SV13, D2[48], muladd(SV12, D2[47], SV11 * D2[46])))))))) + r7 = muladd(SV19, D2[63], muladd(SV18, D2[62], muladd(SV17, D2[61], muladd(SV16, D2[60], muladd(SV15, D2[59], muladd(SV14, D2[58], muladd(SV13, D2[57], muladd(SV12, D2[56], SV11 * D2[55])))))))) + r8 = muladd(SV19, D2[72], muladd(SV18, D2[71], muladd(SV17, D2[70], muladd(SV16, D2[69], muladd(SV15, D2[68], muladd(SV14, D2[67], muladd(SV13, D2[66], muladd(SV12, D2[65], SV11 * D2[64])))))))) + r9 = muladd(SV19, D2[81], muladd(SV18, D2[80], muladd(SV17, D2[79], muladd(SV16, D2[78], muladd(SV15, D2[77], muladd(SV14, D2[76], muladd(SV13, D2[75], muladd(SV12, D2[74], SV11 * D2[73])))))))) + return Tensor{4, 3}((r1, r2, r3, r4, r5, r6, r7, r8, r9)) + end +end +@inline function dcontract(S1::SymmetricTensor{4, 3, T}, S2::Tensor{2, 3, T}) where {T <: SIMDTypes} + @inbounds begin + D1 = get_data(S1); D2 = get_data(S2) + SV11 = tosimd(D1, Val{1}, Val{6}) + SV12 = tosimd(D1, Val{7}, Val{12}) + SV13 = tosimd(D1, Val{13}, Val{18}) + SV14 = tosimd(D1, Val{19}, Val{24}) + SV15 = tosimd(D1, Val{25}, Val{30}) + SV16 = tosimd(D1, Val{31}, Val{36}) + D21 = D2[1]; D22 = D2[2] + D2[4]; D23 = D2[3] + D2[7] + D24 = D2[5]; D25 = D2[6] + D2[8]; D26 = D2[9] + r = muladd(SV16, D26, muladd(SV15, D25, muladd(SV14, D24, muladd(SV13, D23, muladd(SV12, D22, SV11 * D21))))) + return SymmetricTensor{2, 3}(r) + end +end +@inline function dcontract(S1::SymmetricTensor{4, 2, T}, S2::SymmetricTensor{4, 2, T}) where {T <: SIMDTypes} + @inbounds begin + D1 = get_data(S1); D2 = get_data(S2) + SV11 = tosimd(D1, Val{1}, Val{3}) + SV12 = tosimd(D1, Val{4}, Val{6}) + SV13 = tosimd(D1, Val{7}, Val{9}) + D21 = D2[1]; D22 = D2[2] * T(2); D23 = D2[3] + D24 = D2[4]; D25 = D2[5] * T(2); D26 = D2[6] + D27 = D2[7]; D28 = D2[8] * T(2); D29 = D2[9] + r1 = muladd(SV13, D23, muladd(SV12, D22, SV11 * D21)) + r2 = muladd(SV13, D26, muladd(SV12, D25, SV11 * D24)) + r3 = muladd(SV13, D29, muladd(SV12, D28, SV11 * D27)) + return SymmetricTensor{4, 2}((r1, r2, r3)) + end +end +function dcontract(S1::SymmetricTensor{4, 3, T}, S2::SymmetricTensor{4, 3, T}) where {T <: SIMDTypes} + @inbounds begin + D1 = get_data(S1); D2 = get_data(S2) + SV11 = tosimd(D1, Val{1}, Val{6}) + SV12 = tosimd(D1, Val{7}, Val{12}) + SV13 = tosimd(D1, Val{13}, Val{18}) + SV14 = tosimd(D1, Val{19}, Val{24}) + SV15 = tosimd(D1, Val{25}, Val{30}) + SV16 = tosimd(D1, Val{31}, Val{36}) + D21 = D2[1]; D22 = D2[2] * T(2); D23 = D2[3] * T(2); D24 = D2[4]; D25 = D2[5] * T(2); D26 = D2[6] + D27 = D2[7]; D28 = D2[8] * T(2); D29 = D2[9] * T(2); D210 = D2[10]; D211 = D2[11] * T(2); D212 = D2[12] + D213 = D2[13]; D214 = D2[14] * T(2); D215 = D2[15] * T(2); D216 = D2[16]; D217 = D2[17] * T(2); D218 = D2[18] + D219 = D2[19]; D220 = D2[20] * T(2); D221 = D2[21] * T(2); D222 = D2[22]; D223 = D2[23] * T(2); D224 = D2[24] + D225 = D2[25]; D226 = D2[26] * T(2); D227 = D2[27] * T(2); D228 = D2[28]; D229 = D2[29] * T(2); D230 = D2[30] + D231 = D2[31]; D232 = D2[32] * T(2); D233 = D2[33] * T(2); D234 = D2[34]; D235 = D2[35] * T(2); D236 = D2[36] + r1 = muladd(SV16, D26, muladd(SV15, D25, muladd(SV14, D24, muladd(SV13, D23, muladd(SV12, D22, SV11 * D21 ))))) + r2 = muladd(SV16, D212, muladd(SV15, D211, muladd(SV14, D210, muladd(SV13, D29, muladd(SV12, D28, SV11 * D27 ))))) + r3 = muladd(SV16, D218, muladd(SV15, D217, muladd(SV14, D216, muladd(SV13, D215, muladd(SV12, D214, SV11 * D213))))) + r4 = muladd(SV16, D224, muladd(SV15, D223, muladd(SV14, D222, muladd(SV13, D221, muladd(SV12, D220, SV11 * D219))))) + r5 = muladd(SV16, D230, muladd(SV15, D229, muladd(SV14, D228, muladd(SV13, D227, muladd(SV12, D226, SV11 * D225))))) + r6 = muladd(SV16, D236, muladd(SV15, D235, muladd(SV14, D234, muladd(SV13, D233, muladd(SV12, D232, SV11 * D231))))) + return SymmetricTensor{4, 3}((r1, r2, r3, r4, r5, r6)) + end +end + +############### +# (5): otimes # +############### +# 1-1 +@inline function otimes(S1::Vec{2, T}, S2::Vec{2, T}) where {T <: SIMDTypes} + @inbounds begin + D1 = get_data(S1); D2 = get_data(S2) + SV1 = tosimd(D1) + r1 = SV1 * D2[1] + r2 = SV1 * D2[2] + return Tensor{2, 2}((r1, r2)) + end +end +@inline function otimes(S1::Vec{3, T}, S2::Vec{3, T}) where {T <: SIMDTypes} + @inbounds begin + D1 = get_data(S1); D2 = get_data(S2) + SV1 = tosimd(D1) + r1 = SV1 * D2[1]; r2 = SV1 * D2[2]; r3 = SV1 * D2[3] + return Tensor{2, 3}((r1, r2, r3)) + end +end + +# 2s-2 / 2-2s +@inline function otimes(S1::AbstractTensor{2, dim, T}, S2::AbstractTensor{2, dim, T}) where {dim, T <: SIMDTypes} + SS1, SS2 = promote_base(S1, S2); otimes(SS1, SS2) +end + +# 2-2 +@inline function otimes(S1::Tensor{2, 2, T}, S2::Tensor{2, 2, T}) where {T <: SIMDTypes} + @inbounds begin + D1 = get_data(S1); D2 = get_data(S2) + SV1 = tosimd(D1) + r1 = SV1 * D2[1]; r2 = SV1 * D2[2]; r3 = SV1 * D2[3]; r4 = SV1 * D2[4] + return Tensor{4, 2}((r1, r2, r3, r4)) + end +end +@inline function otimes(S1::Tensor{2, 3, T}, S2::Tensor{2, 3, T}) where {T <: SIMDTypes} + @inbounds begin + D1 = get_data(S1); D2 = get_data(S2) + SV1 = tosimd(D1) + r1 = SV1 * D2[1]; r2 = SV1 * D2[2]; r3 = SV1 * D2[3] + r4 = SV1 * D2[4]; r5 = SV1 * D2[5]; r6 = SV1 * D2[6] + r7 = SV1 * D2[7]; r8 = SV1 * D2[8]; r9 = SV1 * D2[9] + return Tensor{4, 3}((r1, r2, r3, r4, r5, r6, r7, r8, r9)) + end +end + +# 2s-2s +@inline function otimes(S1::SymmetricTensor{2, 2, T}, S2::SymmetricTensor{2, 2, T}) where {T <: SIMDTypes} + @inbounds begin + D1 = get_data(S1); D2 = get_data(S2) + SV1 = tosimd(D1) + r1 = SV1 * D2[1]; r2 = SV1 * D2[2]; r3 = SV1 * D2[3] + return SymmetricTensor{4, 2}((r1, r2, r3)) + end +end +@inline function otimes(S1::SymmetricTensor{2, 3, T}, S2::SymmetricTensor{2, 3, T}) where {T <: SIMDTypes} + @inbounds begin + D1 = get_data(S1); D2 = get_data(S2) + SV1 = tosimd(D1) + r1 = SV1 * D2[1]; r2 = SV1 * D2[2]; r3 = SV1 * D2[3] + r4 = SV1 * D2[4]; r5 = SV1 * D2[5]; r6 = SV1 * D2[6] + return SymmetricTensor{4, 3}((r1, r2, r3, r4, r5, r6)) + end +end + +############# +# (6): norm # +############# +# order 1 and order 2 norms rely on dot and dcontract respectively +@inline function LinearAlgebra.norm(S::Tensor{4, dim, T}) where {dim, T <: SIMDTypes} + @inbounds begin + SV = tosimd(get_data(S)) + SVSV = SV * SV + r = sum(SVSV) + return sqrt(r) + end +end +@generated function LinearAlgebra.norm(S::SymmetricTensor{4, dim, T}) where {dim, T <: SIMDTypes} + F = symmetric_factors(4, dim, T) + return quote + $(Expr(:meta, :inline)) + F = $F + SV = tosimd(get_data(S)) + SVSV = SV * SV; FSVSV = F * SVSV; r = sum(FSVSV) + return sqrt(r) + end +end diff --git a/src/special_ops.jl b/src/special_ops.jl index 1fda522b..2dd290d8 100644 --- a/src/special_ops.jl +++ b/src/special_ops.jl @@ -1,23 +1,23 @@ -# specialized methods -""" - dotdot(::Vec, ::SymmetricFourthOrderTensor, ::Vec) - -Computes a special dot product between two vectors and a symmetric fourth order tensor -such that ``a_k C_{ikjl} b_l``. -""" -@generated function dotdot(v1::Vec{dim}, S::SymmetricTensor{4, dim}, v2::Vec{dim}) where {dim} - idx(i,j,k,l) = compute_index(SymmetricTensor{4, dim}, i, j, k, l) - exps = Expr(:tuple) - for j in 1:dim, i in 1:dim - ex1, ex2 = Expr[],Expr[] - for l in 1:dim, k in 1:dim - push!(ex1, :(get_data(v1)[$k] * get_data(S)[$(idx(i,k,j,l))])) - push!(ex2, :(get_data(v2)[$l])) - end - push!(exps.args, reducer(ex1, ex2, true)) - end - return quote - $(Expr(:meta, :inline)) - @inbounds return Tensor{2, dim}($exps) - end -end +# specialized methods +""" + dotdot(::Vec, ::SymmetricFourthOrderTensor, ::Vec) + +Computes a special dot product between two vectors and a symmetric fourth order tensor +such that ``a_k C_{ikjl} b_l``. +""" +@generated function dotdot(v1::Vec{dim}, S::SymmetricTensor{4, dim}, v2::Vec{dim}) where {dim} + idx(i,j,k,l) = compute_index(SymmetricTensor{4, dim}, i, j, k, l) + exps = Expr(:tuple) + for j in 1:dim, i in 1:dim + ex1, ex2 = Expr[],Expr[] + for l in 1:dim, k in 1:dim + push!(ex1, :(get_data(v1)[$k] * get_data(S)[$(idx(i,k,j,l))])) + push!(ex2, :(get_data(v2)[$l])) + end + push!(exps.args, reducer(ex1, ex2, true)) + end + return quote + $(Expr(:meta, :inline)) + @inbounds return Tensor{2, dim}($exps) + end +end diff --git a/src/symmetric.jl b/src/symmetric.jl index cfed4883..bd49498e 100644 --- a/src/symmetric.jl +++ b/src/symmetric.jl @@ -1,115 +1,115 @@ -# symmetric, skew-symmetric and symmetric checks -""" - symmetric(::SecondOrderTensor) - symmetric(::FourthOrderTensor) - -Computes the (minor) symmetric part of a second or fourth order tensor. -Return a `SymmetricTensor`. - -# Examples -```jldoctest -julia> A = rand(Tensor{2,2}) -2×2 Tensor{2, 2, Float64, 4}: - 0.590845 0.566237 - 0.766797 0.460085 - -julia> symmetric(A) -2×2 SymmetricTensor{2, 2, Float64, 3}: - 0.590845 0.666517 - 0.666517 0.460085 -``` -""" -@inline symmetric(S1::SymmetricTensors) = S1 - -@inline function symmetric(S::Tensor{2, dim}) where {dim} - SymmetricTensor{2, dim}(@inline function(i, j) @inbounds i == j ? S[i,j] : (S[i,j] + S[j,i]) / 2 end) -end - - - -""" - minorsymmetric(::FourthOrderTensor) - -Compute the minor symmetric part of a fourth order tensor, return a `SymmetricTensor{4}`. -""" -@inline function minorsymmetric(S::Tensor{4, dim}) where {dim} - SymmetricTensor{4, dim}( - @inline function(i, j, k, l) - @inbounds if i == j && k == l - return S[i,j,k,l] - else - return (S[i,j,k,l] + S[j,i,k,l] + S[i,j,l,k] + S[j,i,l,k]) / 4 - end - end - ) -end - -@inline minorsymmetric(S::SymmetricTensors) = S - -@inline symmetric(S::Tensor{4}) = minorsymmetric(S) - -""" - majorsymmetric(::FourthOrderTensor) - -Compute the major symmetric part of a fourth order tensor. -""" -@inline function majorsymmetric(S::T) where {T <: FourthOrderTensor} - get_base(T)( - @inline function(i, j, k, l) - @inbounds if i == j == k == l || i == k && j == l - return S[i,j,k,l] - else - return (S[i,j,k,l] + S[k,l,i,j]) / 2 - end - end - ) -end - -# Unsafe versions that just truncates -@inline unsafe_symmetric(S::SymmetricTensors) = S -@inline function unsafe_symmetric(S::Tensor{2, dim}) where {dim} - SymmetricTensor{2, dim}(@inline function(i, j) @inbounds S[i,j] end) -end -@inline function unsafe_symmetric(S::Tensor{4, dim}) where {dim} - SymmetricTensor{4, dim}(@inline function(i, j, k, l) @inbounds return S[i,j,k,l] end) -end - -""" - skew(::SecondOrderTensor) - -Computes the skew-symmetric (anti-symmetric) part of a second order tensor, returns a `Tensor{2}`. -""" -@inline skew(S1::Tensor{2}) = (S1 - S1') / 2 -@inline skew(S1::SymmetricTensor{2,dim,T}) where {dim, T} = zero(Tensor{2,dim,T}) - -# Symmetry checks -@inline LinearAlgebra.issymmetric(t::Tensor{2, 1}) = true -@inline LinearAlgebra.issymmetric(t::Tensor{2, 2}) = @inbounds t[1,2] == t[2,1] - -@inline function LinearAlgebra.issymmetric(t::Tensor{2, 3}) - return @inbounds t[1,2] == t[2,1] && t[1,3] == t[3,1] && t[2,3] == t[3,2] -end - -function isminorsymmetric(t::Tensor{4, dim}) where {dim} - @inbounds for l in 1:dim, k in l:dim, j in 1:dim, i in j:dim - if t[i,j,k,l] != t[j,i,k,l] || t[i,j,k,l] != t[i,j,l,k] - return false - end - end - return true -end - -isminorsymmetric(::SymmetricTensor{4}) = true - -function ismajorsymmetric(t::FourthOrderTensor{dim}) where {dim} - @inbounds for l in 1:dim, k in l:dim, j in 1:dim, i in j:dim - if t[i,j,k,l] != t[k,l,i,j] - return false - end - end - return true -end - -LinearAlgebra.issymmetric(t::Tensor{4}) = isminorsymmetric(t) - -LinearAlgebra.issymmetric(::SymmetricTensors) = true +# symmetric, skew-symmetric and symmetric checks +""" + symmetric(::SecondOrderTensor) + symmetric(::FourthOrderTensor) + +Computes the (minor) symmetric part of a second or fourth order tensor. +Return a `SymmetricTensor`. + +# Examples +```jldoctest +julia> A = rand(Tensor{2,2}) +2×2 Tensor{2, 2, Float64, 4}: + 0.590845 0.566237 + 0.766797 0.460085 + +julia> symmetric(A) +2×2 SymmetricTensor{2, 2, Float64, 3}: + 0.590845 0.666517 + 0.666517 0.460085 +``` +""" +@inline symmetric(S1::SymmetricTensors) = S1 + +@inline function symmetric(S::Tensor{2, dim}) where {dim} + SymmetricTensor{2, dim}(@inline function(i, j) @inbounds i == j ? S[i,j] : (S[i,j] + S[j,i]) / 2 end) +end + + + +""" + minorsymmetric(::FourthOrderTensor) + +Compute the minor symmetric part of a fourth order tensor, return a `SymmetricTensor{4}`. +""" +@inline function minorsymmetric(S::Tensor{4, dim}) where {dim} + SymmetricTensor{4, dim}( + @inline function(i, j, k, l) + @inbounds if i == j && k == l + return S[i,j,k,l] + else + return (S[i,j,k,l] + S[j,i,k,l] + S[i,j,l,k] + S[j,i,l,k]) / 4 + end + end + ) +end + +@inline minorsymmetric(S::SymmetricTensors) = S + +@inline symmetric(S::Tensor{4}) = minorsymmetric(S) + +""" + majorsymmetric(::FourthOrderTensor) + +Compute the major symmetric part of a fourth order tensor. +""" +@inline function majorsymmetric(S::T) where {T <: FourthOrderTensor} + get_base(T)( + @inline function(i, j, k, l) + @inbounds if i == j == k == l || i == k && j == l + return S[i,j,k,l] + else + return (S[i,j,k,l] + S[k,l,i,j]) / 2 + end + end + ) +end + +# Unsafe versions that just truncates +@inline unsafe_symmetric(S::SymmetricTensors) = S +@inline function unsafe_symmetric(S::Tensor{2, dim}) where {dim} + SymmetricTensor{2, dim}(@inline function(i, j) @inbounds S[i,j] end) +end +@inline function unsafe_symmetric(S::Tensor{4, dim}) where {dim} + SymmetricTensor{4, dim}(@inline function(i, j, k, l) @inbounds return S[i,j,k,l] end) +end + +""" + skew(::SecondOrderTensor) + +Computes the skew-symmetric (anti-symmetric) part of a second order tensor, returns a `Tensor{2}`. +""" +@inline skew(S1::Tensor{2}) = (S1 - S1') / 2 +@inline skew(S1::SymmetricTensor{2,dim,T}) where {dim, T} = zero(Tensor{2,dim,T}) + +# Symmetry checks +@inline LinearAlgebra.issymmetric(t::Tensor{2, 1}) = true +@inline LinearAlgebra.issymmetric(t::Tensor{2, 2}) = @inbounds t[1,2] == t[2,1] + +@inline function LinearAlgebra.issymmetric(t::Tensor{2, 3}) + return @inbounds t[1,2] == t[2,1] && t[1,3] == t[3,1] && t[2,3] == t[3,2] +end + +function isminorsymmetric(t::Tensor{4, dim}) where {dim} + @inbounds for l in 1:dim, k in l:dim, j in 1:dim, i in j:dim + if t[i,j,k,l] != t[j,i,k,l] || t[i,j,k,l] != t[i,j,l,k] + return false + end + end + return true +end + +isminorsymmetric(::SymmetricTensor{4}) = true + +function ismajorsymmetric(t::FourthOrderTensor{dim}) where {dim} + @inbounds for l in 1:dim, k in l:dim, j in 1:dim, i in j:dim + if t[i,j,k,l] != t[k,l,i,j] + return false + end + end + return true +end + +LinearAlgebra.issymmetric(t::Tensor{4}) = isminorsymmetric(t) + +LinearAlgebra.issymmetric(::SymmetricTensors) = true diff --git a/src/tensor_ops_errors.jl b/src/tensor_ops_errors.jl index fb091c1f..d3aa1d54 100644 --- a/src/tensor_ops_errors.jl +++ b/src/tensor_ops_errors.jl @@ -1,10 +1,10 @@ -# Remove `*` as infix operator between tensors -function Base.:*(S1::AbstractTensor, S2::AbstractTensor) - error("use `⋅` (`\\cdot`) for single contraction and `⊡` (`\\boxdot`) for double contraction instead of `*`") -end - -for f in (:transpose, :adjoint) - @eval function LinearAlgebra.$f(::Vec) - throw(ArgumentError("the (no-op) $($f) is discontinued for `Tensors.Vec`")) - end -end +# Remove `*` as infix operator between tensors +function Base.:*(S1::AbstractTensor, S2::AbstractTensor) + error("use `⋅` (`\\cdot`) for single contraction and `⊡` (`\\boxdot`) for double contraction instead of `*`") +end + +for f in (:transpose, :adjoint) + @eval function LinearAlgebra.$f(::Vec) + throw(ArgumentError("the (no-op) $($f) is discontinued for `Tensors.Vec`")) + end +end diff --git a/src/tensor_product_macro.jl b/src/tensor_product_macro.jl new file mode 100644 index 00000000..5a853a1b --- /dev/null +++ b/src/tensor_product_macro.jl @@ -0,0 +1,416 @@ +# This file defines the following macros +# * @tensor_product: Produce efficient unrolled code from index expression +# * @foreach: macro-loop +# +# The main "workhorse" is the `get_expression` function +# +# First, `get_expression` gives the logic for generating the expression. +# Then, the `tensor_product` macro interprets a convenient input format +# to return a function with the expression from `get_expression`. +# +# ====================================================================================== +# `get_expression` +# ====================================================================================== +const IndexSymbols{N} = NTuple{N, Symbol} + +# Given an index `name` and the `names` corresponding to the `indices`, +# return the index for `name`. +function find_index(name::Symbol, names::IndexSymbols, indices::NTuple{<:Any, Int}) + i = findfirst(Base.Fix1(===, name), names) + i !== nothing && return indices[i] + error("Could not find $name in si=$names") +end + +# Same as above, but if not found in the first `names1`, then try to find in `names2`. +# `isdisjoint(names1, names2)` should be true when calling this function. +function find_index(name::Symbol, names1::IndexSymbols, indices1::NTuple{<:Any, Int}, names2::IndexSymbols, indices2::NTuple{<:Any, Int}) + i = findfirst(Base.Fix1(===, name), names1) + i !== nothing && return indices1[i] + i = findfirst(Base.Fix1(===, name), names2) + i !== nothing && return indices2[i] + error("Could not find $name in names1=$names1 or names2=$names2") +end + +# Return the tensor base, e.g. `Tensor{order, dim}` based on the `name` and which +# indices belong to the tensor. +function get_tensor_type(name::Symbol, index_names::IndexSymbols, dims::NamedTuple) + if name === :Tensor || name === :SymmetricTensor + return getproperty(Tensors, name){length(index_names), first(dims)} + else + error("MixedTensor not yet supported") + end +end + +""" + get_expression(ci::IndexSymbols, ai::IndexSymbols, bi::IndexSymbols, dims::NamedTuple; + TA::Symbol, TB::Symbol, TC::Symbol, use_muladd::Bool=false) + +`ci` gives the output indices, ai the indices of the first tensor and bi of the second. +`dims` describe the dimension for each index name, and may be provided as `Int` if +all are equal (i.e. not MixedTensor). + +**Examples** to get the expression for the following with `dim=2` and standard `Tensor` +inputs and outputs + +* `C = A[i]*B[i]`: `get_expression((), (:i,), (:i,), 2; TA = :Tensor, TB = :Tensor)` +* `C[i] = A[i,j]*B[j]`: `get_expression((:i,), (:i, :j), (:j,), 2; TA = :Tensor, TB = :Tensor, TC = :Tensor)` +* `C[i,j] = A[i,l,m]*B[l,m,j]`: `get_expression((:i, :j), (:i, :l, :m), (:l, :m, :j), 2; TA = :Tensor, TB = :Tensor, TC = :Tensor)` + +""" +function get_expression(ci::IndexSymbols, ai::IndexSymbols, bi::IndexSymbols, + dims::NamedTuple; TC, TA, TB, use_muladd=false + ) + # Convert type to actual Type + TTA = get_tensor_type(TA, ai, dims) + TTB = get_tensor_type(TB, bi, dims) + TTC = get_tensor_type(TC, ci, dims) + + idxA(args...) = compute_index(TTA, args...) + idxB(args...) = compute_index(TTB, args...) + + sum_keys = tuple(sort(intersect(ai, bi))...) # The index names to sum over + + # Validate input + issubset(ci, union(ai, bi)) || error("All indices in ci must in either ai or bi") + isdisjoint(sum_keys, ci) || error("Indices in ci cannot only exist once in union(ai, bi)") + if length(ci) != (length(ai) + length(bi) - 2*length(sum_keys)) + error("Some indices occurs more than once in ai or bi, summation indices should occur once in ai and once in bi") + end + + exps = Expr(:tuple) + for cinds in Iterators.ProductIterator(tuple((1:dims[k] for k in ci)...)) + exa = Expr[] + exb = Expr[] + for sinds in Iterators.ProductIterator(tuple((1:dims[k] for k in sum_keys)...)) + ainds = tuple((find_index(a, ci, cinds, sum_keys, sinds) for a in ai)...) + binds = tuple((find_index(b, ci, cinds, sum_keys, sinds) for b in bi)...) + push!(exa, :(get_data(A)[$(idxA(ainds...))])) + push!(exb, :(get_data(B)[$(idxB(binds...))])) + end + push!(exps.args, reducer(exa, exb, use_muladd)) + end + return :($TTC($(remove_duplicates(TTC, exps)))) +end + +function get_expression(ci, ai::IndexSymbols, bi::IndexSymbols, dim::Int; kwargs...) + dims = NamedTuple(k=>dim for k in union(ai, bi)) # Convert scalar dim to one dim for each index. + return get_expression(ci, ai, bi, dims; kwargs...) +end + +# For scalar output +function get_expression(::Tuple{}, ai::IndexSymbols, bi::IndexSymbols, + dims::NamedTuple; TC::Nothing=nothing, TA, TB, use_muladd=false + ) + # Convert type to actual Type + TTA = get_tensor_type(TA, ai, dims) + TTB = get_tensor_type(TB, bi, dims) + + idxA(args...) = compute_index(TTA, args...) + idxB(args...) = compute_index(TTB, args...) + + sum_keys = tuple(sort(intersect(ai, bi))...) # The index names to sum over + if !(length(sum_keys) == length(ai) == length(bi)) + error("For scalar output, all indices in ai must be in bi, and vice versa") + end + + exa = Expr[] + exb = Expr[] + for sinds in Iterators.ProductIterator(tuple((1:dims[k] for k in sum_keys)...)) + ainds = tuple((find_index(a, sum_keys, sinds) for a in ai)...) + binds = tuple((find_index(b, sum_keys, sinds) for b in bi)...) + push!(exa, :(get_data(A)[$(idxA(ainds...))])) + push!(exb, :(get_data(B)[$(idxB(binds...))])) + end + return reducer(exa, exb, use_muladd) +end + +# ====================================================================================== +# `@tensor_product` +# ====================================================================================== + +# Information extracted about each argument in the function header +struct ArgInfo + name::Symbol + type::Symbol + order::Int + dim::Union{Int,Tuple{<:Any,Int}} +end +ArgInfo(;name, type, order, dim) = ArgInfo(name, type, order, dim) + +# Information extracted about term in the index expression in the function body +struct TermInfo{N} + name::Symbol + inds::IndexSymbols{N} +end + +TermInfo(term::Symbol) = TermInfo(term, ()) +function TermInfo(term::Expr) + term.head === :ref || error("TermInfo requires expression with head=:ref, but head = ", term.head) + name = term.args[1] + inds = tuple(term.args[2:end]...) + return TermInfo(name, inds) +end + +function extract_arginfo(arg_expr::Expr) + if arg_expr.head !== :(::) + error("Expected type specification arg_expr, but got expr with head $(arg_expr.head)") + end + @assert length(arg_expr.args) == 2 + name = arg_expr.args[1] + @assert name isa Symbol + curly = arg_expr.args[2] + @assert curly.head === :curly + type = curly.args[1] + if type ∉ (:Tensor, :SymmetricTensor, #=MixedTensor=#) + error("type = $type was unexpected") + end + order = curly.args[2]::Int # Type-assert required, if user pass e.g. Tensor{2,dim}, + dim = curly.args[3]::Int # the macro will not work, need to pass explicit numbers in the Expr. + isa(dim, Int) || error("dim = $dim was unexpected, curlyargs = $(curly.args)") + return ArgInfo(;name, type, order, dim) +end + +function extract_header_arginfos(header::Expr) + header.head === :call || error("header expression with head=$(header.head) is not supported") + Ainfo = extract_arginfo(header.args[2]) + Binfo = extract_arginfo(header.args[3]) + return Ainfo, Binfo +end + +function extract_body_information(body::Expr) + body.head === :block || error("Expecting a block type expression") + @assert all(isa(a, LineNumberNode) for a in body.args[1:(length(body.args)-1)]) + @assert body.args[end].head === :(=) # Should be an assignment + expr = body.args[end].args + Cinfo = TermInfo(expr[1]) + @assert expr[2].head === :call + @assert expr[2].args[1] === :* + Ainfo = TermInfo(expr[2].args[2]) + Binfo = TermInfo(expr[2].args[3]) + return Cinfo, Ainfo, Binfo +end + +function check_arg_expr_consistency(head, body) + head.name === body.name || error("head ($head) and body ($body) variable names don't match") + if length(body.inds) !== head.order + ninds = length(body.inds) + error("head for $(head.name) specifices tensor of order $(head.order), but index expression has only $ninds ($(body.inds))") + end +end + +function check_index_consistency(::Tuple{}, ai::IndexSymbols, bi::IndexSymbols) + rhs_inds = (ai..., bi...) + if !all(count(k==l for l in rhs_inds) == 2 for k in rhs_inds) + error("All indices must occur exactly twice on the right-hand side for scalar output") + end +end + +function check_index_consistency(ci::IndexSymbols, ai::IndexSymbols, bi::IndexSymbols) + rhs_inds = (ai..., bi...) + # Check that each index occurs only exactly once or twice + if !all(count(k==l for l in rhs_inds) ∈ (1,2) for k in rhs_inds) + error("Indices on the right-hand side, i.e. $rhs_inds, can only occur once or twice") + end + # Find indices that occurs only once + free_inds = tuple((k for k in rhs_inds if count(k==l for l in rhs_inds) == 1)...) + if Set(ci) != Set(free_inds) + error("The free indices on the lhs ($ci), don't match the free indices on the rhs($(free_inds))") + end + # Check that no double indices are given on the lhs + if !all(count(k==l for l in ci) == 1 for k in ci) + error("Indices on the left-hand side can only appear once, which is not the case: $ci") + end +end + +function check_input_consistency(C_body, A_head, A_body, B_head, B_body) + check_arg_expr_consistency(A_head, A_body) + check_arg_expr_consistency(B_head, B_body) + check_index_consistency(C_body.inds, A_body.inds, B_body.inds) +end + +function get_index_dims(dimA::Int, ai::IndexSymbols, dimB::Int, bi::IndexSymbols) + if dimA != dimB + # Will need dispatch for `dimA::Tuple`, `dimB::Tuple` instead for MixedTensor. + error("This should be fixed for MixedTensor, but for now dims must be equal") + end + return dimA +end + +get_output_type(ci::Tuple{}, ::Int, ::ArgInfo, ::TermInfo, ::ArgInfo, ::TermInfo) = nothing + +function get_output_type(ci::IndexSymbols, dim::Int, Aarg::ArgInfo, Aterm::TermInfo, Barg::ArgInfo, Bterm::TermInfo) + syminds = Set{Tuple{Symbol,Symbol}}() + _sort(inds::IndexSymbols{2}) = inds[1] < inds[2] ? inds : (inds[2], inds[1]) + # Collect all sorted pairs of symmetric index names into a Set + for (arg, term) in ((Aarg, Aterm), (Barg, Bterm)) + arg.type !== :SymmetricTensor && continue + for i in 2:2:length(term.inds) + push!(syminds, _sort(term.inds[(i-1):i])) + end + end + # If each consecutive pair of output indices are in syminds, then the output will by symmetric. + is_sym = iseven(length(ci)) && all(_sort(ci[(i-1):i]) ∈ syminds for i in 2:2:length(ci)) + return is_sym ? :SymmetricTensor : :Tensor +end + +# The following should just return :MixedTensor once supported. +get_output_type(ci::IndexSymbols, dims::NamedTuple, Aarg::ArgInfo, Aterm::TermInfo, Barg::ArgInfo, Bterm::TermInfo) = error("MixedTensor not supported") + +function replace_args!(f, args) + for i in 1:length(args) + if args[i] isa Symbol + args[i] = f(args[i]) + elseif args[i] isa Expr + replace_args!(f, args[i].args) + end # Could be e.g. LineNumberNode or a number, string etc. + end +end + +function esc_args!(args; syms=(:A, :B)) + f(s::Symbol) = s ∈ syms ? esc(s) : s + replace_args!(f, args) +end + +function tensor_product!(expr, args...) + # 1) Analyze the header and function body for information + # 2) Generate the function body + # 3) Replace the function body in ´expr` with the generated function body + + # Unpack performance annotations, such as @inbounds and @inline + if expr.head === :macrocall + # Check that type is allowed macros (so we don't have to escape) + @assert expr.args[1] ∈ (Symbol("@inbounds"), Symbol("@inline")) + @assert length(expr.args) == 3 + @assert expr.args[2] isa LineNumberNode + tensor_product!(expr.args[3], args...) + return expr + elseif expr.head === :tuple # get annotations such as muladd + if expr.args[1].head !== :function + error("should be function, but is $(expr.args[1].head)") + end + if length(args) != 0 + error("args given in two locations, a: $args, b: $(expr.args[2:end])") + end + the_args = expr.args[2:end] + expr.head = :function + expr.args = expr.args[1].args + return tensor_product!(expr, the_args...) + end + + use_muladd = :muladd in args + + if expr.head !== :function + error("Unexpected head = $(expr.head)") + end + @assert length(expr.args) == 2 + + # Header + A_arginfo, B_arginfo = extract_header_arginfos(expr.args[1]) + expr.args[1] = esc(expr.args[1]) # Escape header as this should be returned as evaluted in the macro-caller's scope + + # Body + body = expr.args[2] + C_terminfo, A_terminfo, B_terminfo = extract_body_information(body) + check_input_consistency(C_terminfo, A_arginfo, A_terminfo, B_arginfo, B_terminfo) + dim = get_index_dims(A_arginfo.dim, A_terminfo.inds, B_arginfo.dim, B_terminfo.inds) + + # Have checked in extract_body_information that last args in body is the actual expression to be changed. + # Here, we overwrite the index expression content with the generated expression + the_expr = get_expression(C_terminfo.inds, A_terminfo.inds, B_terminfo.inds, dim; + TC = get_output_type(C_terminfo.inds, dim, A_arginfo, A_terminfo, B_arginfo, B_terminfo), + TA = A_arginfo.type, TB = B_arginfo.type, use_muladd + ) + esc_args!(the_expr.args; syms=(:A, :B)) + body.args[end] = the_expr + return expr +end + +""" + @tensor_product(expr, args...) + +Generate a function to evaluate a tensor product based on an index expression. +```julia +@tensor_product function my_op(A::Tensor{2,3}, B::Tensor{1,3}) + C[i] = A[i,j]*B[j] +end +``` +The type specification of `A` and `B` should contain at least the type of tensor, order, and dim. +Additional type parameters can optionally be given to dispatch on e.g. the `eltype`. +The return type of `C`, i.e. `Tensor` or `SymmetricTensor` is inferred from the index +expression and the input tensors. +""" +macro tensor_product(expr, args...) + tensor_product!(expr, args...) +end + +# ====================================================================================== +# `@foreach` +# ====================================================================================== +function getrange(expr) + @assert expr.head === :call + @assert expr.args[1] === :(:) + @assert all(x->isa(x,Number), expr.args[2:end]) + @assert length(expr.args) ∈ (3,4) + if length(expr.args) == 3 # from:to range + return expr.args[2]:expr.args[3] + else #length(expr.args) == 4 # from:step:to range + return expr.args[2]:expr.args[3]:expr.args[4] + end +end + +function getiterable(expr) + if expr.head === :call && expr.args[1] === :(:) + return getrange(expr) + elseif expr.head === :(tuple) + return (a for a in expr.args) + else + error("Don't know what to do with $(expr.head)") + end +end + +function loop_over_cases(loopsym, cases, expr) + exprs = Expr(:tuple) + for loopvar in getiterable(cases) + tmpexpr = deepcopy(expr) + f(s::Symbol) = (s === loopsym ? loopvar : s) + Tensors.replace_args!(f, tmpexpr.args) + push!(exprs.args, esc(tmpexpr)) + end + return exprs +end + +function foreach(expr) + @assert expr.head === :for + loopsym = expr.args[1].args[1] + isa(loopsym, Symbol) || error("Can only loop over one variable") + cases = expr.args[1].args[2] + codeblock = expr.args[2]::Expr + @assert codeblock.head === :block + return loop_over_cases(loopsym, cases, codeblock) +end + +""" + @foreach expr + +Given an expression of the form +```julia +for in + +end +``` +Return one expression for each item in ``, in which all instances of `` +in `` is replaced by the value in ``. `` must be +hard-coded. Example +```julia +@foreach for dim in 1:3 + @foreach for TT in (Tensor, SymmetricTensor) + Tensors.@tensor_product(@inline @inbounds function my_dot(A::TT{2,dim}, B::TT{2,dim}) + C[i,j] = A[i,k]*B[k,j] + end) + end +end +``` +""" +macro foreach(expr) + return foreach(expr) +end diff --git a/src/tensor_products.jl b/src/tensor_products.jl index f5c58fc6..e8a2f74d 100644 --- a/src/tensor_products.jl +++ b/src/tensor_products.jl @@ -1,582 +1,389 @@ -# dcontract, dot, tdot, otimes, cross -""" - dcontract(::SecondOrderTensor, ::SecondOrderTensor) - dcontract(::SecondOrderTensor, ::FourthOrderTensor) - dcontract(::FourthOrderTensor, ::SecondOrderTensor) - dcontract(::FourthOrderTensor, ::FourthOrderTensor) - -Compute the double contraction between two tensors. -The symbol `⊡`, written `\\boxdot`, is overloaded for double contraction. -The reason `:` is not used is because it does not have the same precedence as multiplication. - -# Examples -```jldoctest -julia> A = rand(SymmetricTensor{2, 2}); - -julia> B = rand(SymmetricTensor{2, 2}); - -julia> dcontract(A,B) -1.9732018397544984 - -julia> A ⊡ B -1.9732018397544984 -``` -""" -@generated function dcontract(S1::SecondOrderTensor{dim}, S2::SecondOrderTensor{dim}) where {dim} - idxS1(i, j) = compute_index(get_base(S1), i, j) - idxS2(i, j) = compute_index(get_base(S2), i, j) - ex1 = Expr[:(get_data(S1)[$(idxS1(i, j))]) for i in 1:dim, j in 1:dim][:] - ex2 = Expr[:(get_data(S2)[$(idxS2(i, j))]) for i in 1:dim, j in 1:dim][:] - exp = reducer(ex1, ex2) - return quote - $(Expr(:meta, :inline)) - @inbounds return $exp - end -end - -@generated function dcontract(S1::SecondOrderTensor{dim}, S2::FourthOrderTensor{dim}) where {dim} - TensorType = getreturntype(dcontract, get_base(S1), get_base(S2)) - idxS1(i, j) = compute_index(get_base(S1), i, j) - idxS2(i, j, k, l) = compute_index(get_base(S2), i, j, k, l) - exps = Expr(:tuple) - for l in 1:dim, k in 1:dim - ex1 = Expr[:(get_data(S1)[$(idxS1(i, j))]) for i in 1:dim, j in 1:dim][:] - ex2 = Expr[:(get_data(S2)[$(idxS2(i, j, k, l))]) for i in 1:dim, j in 1:dim][:] - push!(exps.args, reducer(ex1, ex2, true)) - end - expr = remove_duplicates(TensorType, exps) - quote - $(Expr(:meta, :inline)) - @inbounds return $TensorType($expr) - end -end - -@generated function dcontract(S1::FourthOrderTensor{dim}, S2::SecondOrderTensor{dim}) where {dim} - TensorType = getreturntype(dcontract, get_base(S1), get_base(S2)) - idxS1(i, j, k, l) = compute_index(get_base(S1), i, j, k, l) - idxS2(i, j) = compute_index(get_base(S2), i, j) - exps = Expr(:tuple) - for j in 1:dim, i in 1:dim - ex1 = Expr[:(get_data(S1)[$(idxS1(i, j, k, l))]) for k in 1:dim, l in 1:dim][:] - ex2 = Expr[:(get_data(S2)[$(idxS2(k, l))]) for k in 1:dim, l in 1:dim][:] - push!(exps.args, reducer(ex1, ex2, true)) - end - expr = remove_duplicates(TensorType, exps) - quote - $(Expr(:meta, :inline)) - @inbounds return $TensorType($expr) - end -end - -@generated function dcontract(S1::SecondOrderTensor{dim}, S2::Tensor{3,dim}) where {dim} - TensorType = getreturntype(dcontract, get_base(S1), get_base(S2)) - idxS1(i, j) = compute_index(get_base(S1), i, j) - idxS2(i, j, k) = compute_index(get_base(S2), i, j, k) - exps = Expr(:tuple) - for k in 1:dim - ex1 = Expr[:(get_data(S1)[$(idxS1(i, j))]) for i in 1:dim, j in 1:dim][:] - ex2 = Expr[:(get_data(S2)[$(idxS2(i, j, k))]) for i in 1:dim, j in 1:dim][:] - push!(exps.args, reducer(ex1, ex2, true)) - end - expr = remove_duplicates(TensorType, exps) # TODO: Required? - quote - $(Expr(:meta, :inline)) - @inbounds return $TensorType($expr) - end -end - -@generated function dcontract(S1::Tensor{3,dim}, S2::SecondOrderTensor{dim}) where {dim} - TensorType = getreturntype(dcontract, get_base(S1), get_base(S2)) - idxS1(i, j, k) = compute_index(get_base(S1), i, j, k) - idxS2(i, j) = compute_index(get_base(S2), i, j) - exps = Expr(:tuple) - for i in 1:dim - ex1 = Expr[:(get_data(S1)[$(idxS1(i, k, l))]) for k in 1:dim, l in 1:dim][:] - ex2 = Expr[:(get_data(S2)[$(idxS2(k, l))]) for k in 1:dim, l in 1:dim][:] - push!(exps.args, reducer(ex1, ex2, true)) - end - expr = remove_duplicates(TensorType, exps) - quote - $(Expr(:meta, :inline)) - @inbounds return $TensorType($expr) - end -end - -@generated function dcontract(S1::Tensor{3,dim}, S2::FourthOrderTensor{dim}) where {dim} - TensorType = getreturntype(dcontract, get_base(S1), get_base(S2)) - idxS1(i, j, k) = compute_index(get_base(S1), i, j, k) - idxS2(i, j, k, l) = compute_index(get_base(S2), i, j, k, l) - exps = Expr(:tuple) - for k in 1:dim, j in 1:dim, i in 1:dim - ex1 = Expr[:(get_data(S1)[$(idxS1(i, m, n))]) for m in 1:dim, n in 1:dim][:] - ex2 = Expr[:(get_data(S2)[$(idxS2(m, n, j, k))]) for m in 1:dim, n in 1:dim][:] - push!(exps.args, reducer(ex1, ex2, true)) - end - expr = remove_duplicates(TensorType, exps) - quote - $(Expr(:meta, :inline)) - @inbounds return $TensorType($expr) - end -end - -@generated function dcontract(S1::FourthOrderTensor{dim}, S2::Tensor{3,dim}) where {dim} - TensorType = getreturntype(dcontract, get_base(S1), get_base(S2)) - idxS1(i, j, k, l) = compute_index(get_base(S1), i, j, k, l) - idxS2(i, j, k) = compute_index(get_base(S2), i, j, k) - exps = Expr(:tuple) - for k in 1:dim, j in 1:dim, i in 1:dim - ex1 = Expr[:(get_data(S1)[$(idxS1(i, j, m, n))]) for m in 1:dim, n in 1:dim][:] - ex2 = Expr[:(get_data(S2)[$(idxS2(m, n, k))]) for m in 1:dim, n in 1:dim][:] - push!(exps.args, reducer(ex1, ex2, true)) - end - expr = remove_duplicates(TensorType, exps) - quote - $(Expr(:meta, :inline)) - @inbounds return $TensorType($expr) - end -end - -@generated function dcontract(S1::FourthOrderTensor{dim}, S2::FourthOrderTensor{dim}) where {dim} - TensorType = getreturntype(dcontract, get_base(S1), get_base(S2)) - idxS1(i, j, k, l) = compute_index(get_base(S1), i, j, k, l) - idxS2(i, j, k, l) = compute_index(get_base(S2), i, j, k, l) - exps = Expr(:tuple) - for l in 1:dim, k in 1:dim, j in 1:dim, i in 1:dim - ex1 = Expr[:(get_data(S1)[$(idxS1(i, j, m, n))]) for m in 1:dim, n in 1:dim][:] - ex2 = Expr[:(get_data(S2)[$(idxS2(m, n, k, l))]) for m in 1:dim, n in 1:dim][:] - push!(exps.args, reducer(ex1, ex2, true)) - end - expr = remove_duplicates(TensorType, exps) - quote - $(Expr(:meta, :inline)) - @inbounds return $TensorType($expr) - end -end - -const ⊡ = dcontract - -""" - otimes(::Vec, ::Vec) - otimes(::SecondOrderTensor, ::SecondOrderTensor) - -Compute the open product between two tensors. -The symbol `⊗`, written `\\otimes`, is overloaded for tensor products. - -# Examples -```jldoctest -julia> A = rand(SymmetricTensor{2, 2}); - -julia> B = rand(SymmetricTensor{2, 2}); - -julia> A ⊗ B -2×2×2×2 SymmetricTensor{4, 2, Float64, 9}: -[:, :, 1, 1] = - 0.271839 0.352792 - 0.352792 0.260518 - -[:, :, 2, 1] = - 0.469146 0.608857 - 0.608857 0.449607 - -[:, :, 1, 2] = - 0.469146 0.608857 - 0.608857 0.449607 - -[:, :, 2, 2] = - 0.504668 0.654957 - 0.654957 0.48365 -``` -""" -@inline function otimes(S1::Vec{dim}, S2::Vec{dim}) where {dim} - return Tensor{2, dim}(@inline function(i,j) @inbounds S1[i] * S2[j]; end) -end - -@inline function otimes(S1::Vec{dim}, S2::SecondOrderTensor{dim}) where {dim} - return Tensor{3, dim}(@inline function(i,j,k) @inbounds S1[i] * S2[j,k]; end) -end -@inline function otimes(S1::SecondOrderTensor{dim}, S2::Vec{dim}) where {dim} - return Tensor{3, dim}(@inline function(i,j,k) @inbounds S1[i,j] * S2[k]; end) -end - -@inline function otimes(S1::SecondOrderTensor{dim}, S2::SecondOrderTensor{dim}) where {dim} - TensorType = getreturntype(otimes, get_base(typeof(S1)), get_base(typeof(S2))) - TensorType(@inline function(i,j,k,l) @inbounds S1[i,j] * S2[k,l]; end) -end - -# Defining {3}⊗{1} and {1}⊗{3} = {4} would also be valid... - -@inline otimes(S1::Number, S2::Number) = S1*S2 -@inline otimes(S1::AbstractTensor, S2::Number) = S1*S2 -@inline otimes(S1::Number, S2::AbstractTensor) = S1*S2 - -const ⊗ = otimes - -""" - otimes(::Vec) - -Compute the open product of a vector with itself. -Return a `SymmetricTensor`. - -# Examples -```jldoctest -julia> A = rand(Vec{2}) -2-element Vec{2, Float64}: - 0.5908446386657102 - 0.7667970365022592 - -julia> otimes(A) -2×2 SymmetricTensor{2, 2, Float64, 3}: - 0.349097 0.453058 - 0.453058 0.587978 -``` -""" -@inline function otimes(S::Vec{dim}) where {dim} - return SymmetricTensor{2, dim}(@inline function(i,j) @inbounds S[i] * S[j]; end) -end - -""" - otimesu(::SecondOrderTensor, ::SecondOrderTensor) - -Compute the "upper" open product between two tensors. - -# Examples -```jldoctest -julia> A = rand(SymmetricTensor{2, 2}); - -julia> B = rand(SymmetricTensor{2, 2}); - -julia> otimesu(A, B) -2×2×2×2 Tensor{4, 2, Float64, 16}: -[:, :, 1, 1] = - 0.271839 0.469146 - 0.352792 0.608857 - -[:, :, 2, 1] = - 0.352792 0.608857 - 0.260518 0.449607 - -[:, :, 1, 2] = - 0.469146 0.504668 - 0.608857 0.654957 - -[:, :, 2, 2] = - 0.608857 0.654957 - 0.449607 0.48365 -``` -""" -@inline function otimesu(S1::SecondOrderTensor{dim}, S2::SecondOrderTensor{dim}) where {dim} - S1_ = convert(Tensor, S1) # Convert to full tensor if symmetric to make 10x faster... (see Tensors.jl#164) - S2_ = convert(Tensor, S2) - return Tensor{4, dim}((i,j,k,l) -> S1_[i,k] * S2_[j,l]) -end - -""" - otimesl(::SecondOrderTensor, ::SecondOrderTensor) - -Compute the "lower" open product between two tensors. - -# Examples -```jldoctest -julia> A = rand(SymmetricTensor{2, 2}); - -julia> B = rand(SymmetricTensor{2, 2}); - -julia> otimesl(A, B) -2×2×2×2 Tensor{4, 2, Float64, 16}: -[:, :, 1, 1] = - 0.271839 0.469146 - 0.352792 0.608857 - -[:, :, 2, 1] = - 0.469146 0.504668 - 0.608857 0.654957 - -[:, :, 1, 2] = - 0.352792 0.608857 - 0.260518 0.449607 - -[:, :, 2, 2] = - 0.608857 0.654957 - 0.449607 0.48365 -``` -""" -@inline function otimesl(S1::SecondOrderTensor{dim}, S2::SecondOrderTensor{dim}) where {dim} - S1_ = convert(Tensor, S1) # Convert to full tensor if symmetric to make 10x faster... (see Tensors.jl#164) - S2_ = convert(Tensor, S2) - return Tensor{4, dim}((i,j,k,l) -> S1_[i,l] * S2_[j,k]) -end - -""" - dot(::Vec, ::Vec) - dot(::Vec, ::SecondOrderTensor) - dot(::SecondOrderTensor, ::Vec) - dot(::SecondOrderTensor, ::SecondOrderTensor) - -Computes the dot product (single contraction) between two tensors. -The symbol `⋅`, written `\\cdot`, is overloaded for single contraction. - -# Examples -```jldoctest -julia> A = rand(Tensor{2, 2}) -2×2 Tensor{2, 2, Float64, 4}: - 0.590845 0.566237 - 0.766797 0.460085 - -julia> B = rand(Tensor{1, 2}) -2-element Vec{2, Float64}: - 0.7940257103317943 - 0.8541465903790502 - -julia> dot(A, B) -2-element Vec{2, Float64}: - 0.9527955925660736 - 1.0018368881367576 - -julia> A ⋅ B -2-element Vec{2, Float64}: - 0.9527955925660736 - 1.0018368881367576 -``` -""" -@generated function LinearAlgebra.dot(S1::Vec{dim}, S2::Vec{dim}) where {dim} - ex1 = Expr[:(get_data(S1)[$i]) for i in 1:dim] - ex2 = Expr[:(get_data(S2)[$i]) for i in 1:dim] - exp = reducer(ex1, ex2) - quote - $(Expr(:meta, :inline)) - @inbounds return $exp - end -end - -@generated function LinearAlgebra.dot(S1::SecondOrderTensor{dim}, S2::Vec{dim}) where {dim} - idxS1(i, j) = compute_index(get_base(S1), i, j) - exps = Expr(:tuple) - for i in 1:dim - ex1 = Expr[:(get_data(S1)[$(idxS1(i, j))]) for j in 1:dim] - ex2 = Expr[:(get_data(S2)[$j]) for j in 1:dim] - push!(exps.args, reducer(ex1, ex2)) - end - quote - $(Expr(:meta, :inline)) - @inbounds return Vec{dim}($exps) - end -end - -@generated function LinearAlgebra.dot(S1::Vec{dim}, S2::SecondOrderTensor{dim}) where {dim} - idxS2(i, j) = compute_index(get_base(S2), i, j) - exps = Expr(:tuple) - for j in 1:dim - ex1 = Expr[:(get_data(S1)[$i]) for i in 1:dim] - ex2 = Expr[:(get_data(S2)[$(idxS2(i, j))]) for i in 1:dim] - push!(exps.args, reducer(ex1, ex2)) - end - quote - $(Expr(:meta, :inline)) - @inbounds return Vec{dim}($exps) - end -end - -@generated function LinearAlgebra.dot(S1::SecondOrderTensor{dim}, S2::SecondOrderTensor{dim}) where {dim} - idxS1(i, j) = compute_index(get_base(S1), i, j) - idxS2(i, j) = compute_index(get_base(S2), i, j) - exps = Expr(:tuple) - for j in 1:dim, i in 1:dim - ex1 = Expr[:(get_data(S1)[$(idxS1(i, k))]) for k in 1:dim] - ex2 = Expr[:(get_data(S2)[$(idxS2(k, j))]) for k in 1:dim] - push!(exps.args, reducer(ex1, ex2)) - end - quote - $(Expr(:meta, :inline)) - @inbounds return Tensor{2, dim}($exps) - end -end - -@generated function LinearAlgebra.dot(S1::FourthOrderTensor{dim}, S2::SecondOrderTensor{dim}) where {dim} - idxS1(i, j, k, l) = compute_index(get_base(S1), i, j, k, l) - idxS2(i, j) = compute_index(get_base(S2), i, j) - exps = Expr(:tuple) - for l in 1:dim, k in 1:dim, j in 1:dim, i in 1:dim - ex1 = Expr[:(get_data(S1)[$(idxS1(i, j, k, m))]) for m in 1:dim] - ex2 = Expr[:(get_data(S2)[$(idxS2(m, l))]) for m in 1:dim] - push!(exps.args, reducer(ex1, ex2)) - end - quote - $(Expr(:meta, :inline)) - @inbounds return Tensor{4, dim}($exps) - end -end - -@generated function LinearAlgebra.dot(S1::SecondOrderTensor{dim}, S2::FourthOrderTensor{dim}) where {dim} - idxS1(i, j) = compute_index(get_base(S1), i, j) - idxS2(i, j, k, l) = compute_index(get_base(S2), i, j, k, l) - exps = Expr(:tuple) - for l in 1:dim, k in 1:dim, j in 1:dim, i in 1:dim - ex1 = Expr[:(get_data(S1)[$(idxS1(i, m))]) for m in 1:dim] - ex2 = Expr[:(get_data(S2)[$(idxS2(m, j, k, l))]) for m in 1:dim] - push!(exps.args, reducer(ex1, ex2)) - end - quote - $(Expr(:meta, :inline)) - @inbounds return Tensor{4, dim}($exps) - end -end - -@generated function LinearAlgebra.dot(S1::Tensor{3,dim}, S2::Vec{dim}) where {dim} - idxS1(i, j, k) = compute_index(get_base(S1), i, j, k) - exps = Expr(:tuple) - for j in 1:dim, i in 1:dim - ex1 = Expr[:(get_data(S1)[$(idxS1(i, j, m))]) for m in 1:dim] - ex2 = Expr[:(get_data(S2)[$m]) for m in 1:dim] - push!(exps.args, reducer(ex1, ex2)) - end - quote - $(Expr(:meta, :inline)) - @inbounds return Tensor{2, dim}($exps) - end -end - -@generated function LinearAlgebra.dot(S1::Vec{dim}, S2::Tensor{3,dim}) where {dim} - idxS2(i, j, k) = compute_index(get_base(S2), i, j, k) - exps = Expr(:tuple) - for j in 1:dim, i in 1:dim - ex1 = Expr[:(get_data(S1)[$m]) for m in 1:dim] - ex2 = Expr[:(get_data(S2)[$(idxS2(m, i, j))]) for m in 1:dim] - push!(exps.args, reducer(ex1, ex2)) - end - quote - $(Expr(:meta, :inline)) - @inbounds return Tensor{2, dim}($exps) - end -end - -@generated function LinearAlgebra.dot(S1::SecondOrderTensor{dim}, S2::Tensor{3,dim}) where {dim} - idxS1(i, j) = compute_index(get_base(S1), i, j) - idxS2(i, j, k) = compute_index(get_base(S2), i, j, k) - exps = Expr(:tuple) - for k in 1:dim, j in 1:dim, i in 1:dim - ex1 = Expr[:(get_data(S1)[$(idxS1(i, m))]) for m in 1:dim] - ex2 = Expr[:(get_data(S2)[$(idxS2(m, j, k))]) for m in 1:dim] - push!(exps.args, reducer(ex1, ex2)) - end - quote - $(Expr(:meta, :inline)) - @inbounds return Tensor{3, dim}($exps) - end -end - -@generated function LinearAlgebra.dot(S1::Tensor{3,dim}, S2::SecondOrderTensor{dim}) where {dim} - idxS1(i, j, k) = compute_index(get_base(S1), i, j, k) - idxS2(i, j) = compute_index(get_base(S2), i, j) - exps = Expr(:tuple) - for k in 1:dim, j in 1:dim, i in 1:dim - ex1 = Expr[:(get_data(S1)[$(idxS1(i, j, m))]) for m in 1:dim] - ex2 = Expr[:(get_data(S2)[$(idxS2(m, k))]) for m in 1:dim] - push!(exps.args, reducer(ex1, ex2)) - end - quote - $(Expr(:meta, :inline)) - @inbounds return Tensor{3, dim}($exps) - end -end - -""" - dot(::SymmetricTensor{2}) - -Compute the dot product of a symmetric second order tensor with itself. -Return a `SymmetricTensor`. - -See also [`tdot`](@ref) and [`dott`](@ref). - -# Examples -```jldoctest -julia> A = rand(SymmetricTensor{2,3}) -3×3 SymmetricTensor{2, 3, Float64, 6}: - 0.590845 0.766797 0.566237 - 0.766797 0.460085 0.794026 - 0.566237 0.794026 0.854147 - -julia> dot(A) -3×3 SymmetricTensor{2, 3, Float64, 6}: - 1.2577 1.25546 1.42706 - 1.25546 1.43013 1.47772 - 1.42706 1.47772 1.68067 -``` -""" -@inline LinearAlgebra.dot(S::SymmetricTensor{2}) = tdot(S) - -""" - tdot(A::SecondOrderTensor) - -Compute the transpose-dot product of `A` with itself, i.e. `dot(A', A)`. -Return a `SymmetricTensor`. - -# Examples -```jldoctest -julia> A = rand(Tensor{2,3}) -3×3 Tensor{2, 3, Float64, 9}: - 0.590845 0.460085 0.200586 - 0.766797 0.794026 0.298614 - 0.566237 0.854147 0.246837 - -julia> tdot(A) -3×3 SymmetricTensor{2, 3, Float64, 6}: - 1.2577 1.36435 0.48726 - 1.36435 1.57172 0.540229 - 0.48726 0.540229 0.190334 -``` -""" -@inline tdot(S::SecondOrderTensor) = unsafe_symmetric(S' ⋅ S) - -""" - dott(A::SecondOrderTensor) - -Compute the dot-transpose product of `A` with itself, i.e. `dot(A, A')`. -Return a `SymmetricTensor`. - -# Examples -```jldoctest -julia> A = rand(Tensor{2,3}) -3×3 Tensor{2, 3, Float64, 9}: - 0.590845 0.460085 0.200586 - 0.766797 0.794026 0.298614 - 0.566237 0.854147 0.246837 - -julia> dott(A) -3×3 SymmetricTensor{2, 3, Float64, 6}: - 0.601011 0.878275 0.777051 - 0.878275 1.30763 1.18611 - 0.777051 1.18611 1.11112 -``` -""" -@inline dott(S::SecondOrderTensor) = unsafe_symmetric(S ⋅ S') - -""" - cross(::Vec, ::Vec) - -Computes the cross product between two `Vec` vectors, returns a `Vec{3}`. For dimensions 1 and 2 the `Vec`'s -are expanded to 3D first. The infix operator `×` (written `\\times`) can also be used. - -# Examples -```jldoctest -julia> a = rand(Vec{3}) -3-element Vec{3, Float64}: - 0.5908446386657102 - 0.7667970365022592 - 0.5662374165061859 - -julia> b = rand(Vec{3}) -3-element Vec{3, Float64}: - 0.4600853424625171 - 0.7940257103317943 - 0.8541465903790502 - -julia> a × b -3-element Vec{3, Float64}: - 0.20535000738340053 - -0.24415039787171888 - 0.11635375677388776 -``` -""" -@inline LinearAlgebra.cross(u::Vec{3}, v::Vec{3}) = @inbounds Vec{3}((u[2]*v[3] - u[3]*v[2], u[3]*v[1] - u[1]*v[3], u[1]*v[2] - u[2]*v[1])) -@inline LinearAlgebra.cross(u::Vec{2,T1}, v::Vec{2,T2}) where {T1,T2} = @inbounds Vec{3}((zero(T1)*zero(T2), zero(T1)*zero(T2), u[1]*v[2] - u[2]*v[1])) -@inline LinearAlgebra.cross( ::Vec{1,T1}, ::Vec{1,T2}) where {T1,T2} = @inbounds zero(Vec{3,promote_type(T1,T2)}) +# dcontract, dot, tdot, otimes, crossa:\:TT{ + +@foreach for dim in 1:3 + @foreach for TA in (Tensor, SymmetricTensor) + @foreach for TB in (Tensor, SymmetricTensor) + # dcontract with both tensors of even order + @tensor_product(@inline @inbounds function dcontract(A::TA{2,dim}, B::TB{2,dim}) + C = A[i,j]*B[i,j] + end, muladd) + @tensor_product(@inline @inbounds function dcontract(A::TA{4,dim}, B::TB{2,dim}) + C[i,j] = A[i,j,k,l]*B[k,l] + end, muladd) + @tensor_product(@inline @inbounds function dcontract(A::TA{2,dim}, B::TB{4,dim}) + C[k,l] = A[i,j]*B[i,j,k,l] + end, muladd) + @tensor_product(@inline @inbounds function dcontract(A::TA{4,dim}, B::TB{4,dim}) + C[i,j,k,l] = A[i,j,m,n]*B[m,n,k,l] + end, muladd) + + # otimes between 2nd order tensors + @tensor_product(@inline @inbounds function otimes(A::TA{2,dim}, B::TB{2,dim}) + C[i,j,k,l] = A[i,j]*B[k,l] + end) + + # dot between two tensors with even order + @tensor_product(@inline @inbounds function LinearAlgebra.dot(A::TA{2,dim}, B::TB{2,dim}) + C[i,j] = A[i,k]*B[k,j] + end) + @tensor_product(@inline @inbounds function LinearAlgebra.dot(A::TA{4,dim}, B::TB{2,dim}) + C[i,j,k,l] = A[i,j,k,m]*B[m,l] + end) + @tensor_product(@inline @inbounds function LinearAlgebra.dot(A::TA{2,dim}, B::TB{4,dim}) + C[i,j,k,l] = A[i,m]*B[m,j,k,l] + end) + end + + # dcontract with 3rd order tensors + @tensor_product(@inline @inbounds function dcontract(A::TA{2,dim}, B::Tensor{3,dim}) + C[i] = A[k,l]*B[k,l,i] + end, muladd) + @tensor_product(@inline @inbounds function dcontract(A::Tensor{3,dim}, B::TA{2,dim}) + C[i] = A[i,k,l]*B[k,l] + end, muladd) + @tensor_product(@inline @inbounds function dcontract(A::TA{4,dim}, B::Tensor{3,dim}) + C[i,j,m] = A[i,j,k,l]*B[k,l,m] + end, muladd) + @tensor_product(@inline @inbounds function dcontract(A::Tensor{3,dim}, B::TA{4,dim}) + C[i,m,n] = A[i,k,l]*B[k,l,m,n] + end, muladd) + + # otimes where one argument has an odd order, and one has even order + @tensor_product(@inline @inbounds function otimes(A::Tensor{1,dim}, B::TA{2,dim}) + C[i,j,k] = A[i]*B[j,k] + end) + @tensor_product(@inline @inbounds function otimes(A::TA{2,dim}, B::Tensor{1,dim}) + C[i,j,k] = A[i,j]*B[k] + end) + + # dot where one argument has odd order, and one has even order + @tensor_product(@inline @inbounds function LinearAlgebra.dot(A::TA{2,dim}, B::Tensor{1,dim}) + C[i] = A[i,j]*B[j] + end) + @tensor_product(@inline @inbounds function LinearAlgebra.dot(A::Tensor{1,dim}, B::TA{2,dim}) + C[j] = A[i]*B[i,j] + end) + @tensor_product(@inline @inbounds function LinearAlgebra.dot(A::Tensor{3,dim}, B::TA{2,dim}) + C[i,j,k] = A[i,j,m]*B[m,k] + end) + @tensor_product(@inline @inbounds function LinearAlgebra.dot(A::TA{2,dim}, B::Tensor{3,dim}) + C[i,j,k] = A[i,m]*B[m,j,k] + end) + + end + # otimes where both tensors have odd orders + @tensor_product(@inline @inbounds function otimes(A::Tensor{1,dim}, B::Tensor{1,dim}) + C[i,j] = A[i]*B[j] + end) + # Defining {3}⊗{1} and {1}⊗{3} = {4} would also be valid... + + # dot where both tensors have odd orders + @tensor_product(@inline @inbounds function LinearAlgebra.dot(A::Tensor{1,dim}, B::Tensor{1,dim}) + C = A[i]*B[i] + end) + @tensor_product(@inline @inbounds function LinearAlgebra.dot(A::Tensor{3,dim}, B::Tensor{1,dim}) + C[i,j] = A[i,j,k]*B[k] + end) + @tensor_product(@inline @inbounds function LinearAlgebra.dot(A::Tensor{1,dim}, B::Tensor{3,dim}) + C[i,j] = A[k]*B[k,i,j] + end) +end + +""" + dcontract(::SecondOrderTensor, ::SecondOrderTensor) + dcontract(::SecondOrderTensor, ::FourthOrderTensor) + dcontract(::FourthOrderTensor, ::SecondOrderTensor) + dcontract(::FourthOrderTensor, ::FourthOrderTensor) + +Compute the double contraction between two tensors. +The symbol `⊡`, written `\\boxdot`, is overloaded for double contraction. +The reason `:` is not used is because it does not have the same precedence as multiplication. + +# Examples +```jldoctest +julia> A = rand(SymmetricTensor{2, 2}); + +julia> B = rand(SymmetricTensor{2, 2}); + +julia> dcontract(A,B) +1.9732018397544984 + +julia> A ⊡ B +1.9732018397544984 +``` +""" +function dcontract end + +const ⊡ = dcontract + +""" + otimes(::Vec, ::Vec) + otimes(::SecondOrderTensor, ::SecondOrderTensor) + +Compute the open product between two tensors. +The symbol `⊗`, written `\\otimes`, is overloaded for tensor products. + +# Examples +```jldoctest +julia> A = rand(SymmetricTensor{2, 2}); + +julia> B = rand(SymmetricTensor{2, 2}); + +julia> A ⊗ B +2×2×2×2 SymmetricTensor{4, 2, Float64, 9}: +[:, :, 1, 1] = + 0.271839 0.352792 + 0.352792 0.260518 + +[:, :, 2, 1] = + 0.469146 0.608857 + 0.608857 0.449607 + +[:, :, 1, 2] = + 0.469146 0.608857 + 0.608857 0.449607 + +[:, :, 2, 2] = + 0.504668 0.654957 + 0.654957 0.48365 +``` +""" +function otimes end + + +@inline otimes(S1::Number, S2::Number) = S1*S2 +@inline otimes(S1::AbstractTensor, S2::Number) = S1*S2 +@inline otimes(S1::Number, S2::AbstractTensor) = S1*S2 + +const ⊗ = otimes + +""" + otimes(::Vec) + +Compute the open product of a vector with itself. +Return a `SymmetricTensor`. + +# Examples +```jldoctest +julia> A = rand(Vec{2}) +2-element Vec{2, Float64}: + 0.5908446386657102 + 0.7667970365022592 + +julia> otimes(A) +2×2 SymmetricTensor{2, 2, Float64, 3}: + 0.349097 0.453058 + 0.453058 0.587978 +``` +""" +@inline function otimes(S::Vec{dim}) where {dim} + return SymmetricTensor{2, dim}(@inline function(i,j) @inbounds S[i] * S[j]; end) +end + +""" + otimesu(::SecondOrderTensor, ::SecondOrderTensor) + +Compute the "upper" open product between two tensors. + +# Examples +```jldoctest +julia> A = rand(SymmetricTensor{2, 2}); + +julia> B = rand(SymmetricTensor{2, 2}); + +julia> otimesu(A, B) +2×2×2×2 Tensor{4, 2, Float64, 16}: +[:, :, 1, 1] = + 0.271839 0.469146 + 0.352792 0.608857 + +[:, :, 2, 1] = + 0.352792 0.608857 + 0.260518 0.449607 + +[:, :, 1, 2] = + 0.469146 0.504668 + 0.608857 0.654957 + +[:, :, 2, 2] = + 0.608857 0.654957 + 0.449607 0.48365 +``` +""" +@inline function otimesu(S1::SecondOrderTensor{dim}, S2::SecondOrderTensor{dim}) where {dim} + S1_ = convert(Tensor, S1) # Convert to full tensor if symmetric to make 10x faster... (see Tensors.jl#164) + S2_ = convert(Tensor, S2) + return Tensor{4, dim}((i,j,k,l) -> S1_[i,k] * S2_[j,l]) +end + +""" + otimesl(::SecondOrderTensor, ::SecondOrderTensor) + +Compute the "lower" open product between two tensors. + +# Examples +```jldoctest +julia> A = rand(SymmetricTensor{2, 2}); + +julia> B = rand(SymmetricTensor{2, 2}); + +julia> otimesl(A, B) +2×2×2×2 Tensor{4, 2, Float64, 16}: +[:, :, 1, 1] = + 0.271839 0.469146 + 0.352792 0.608857 + +[:, :, 2, 1] = + 0.469146 0.504668 + 0.608857 0.654957 + +[:, :, 1, 2] = + 0.352792 0.608857 + 0.260518 0.449607 + +[:, :, 2, 2] = + 0.608857 0.654957 + 0.449607 0.48365 +``` +""" +@inline function otimesl(S1::SecondOrderTensor{dim}, S2::SecondOrderTensor{dim}) where {dim} + S1_ = convert(Tensor, S1) # Convert to full tensor if symmetric to make 10x faster... (see Tensors.jl#164) + S2_ = convert(Tensor, S2) + return Tensor{4, dim}((i,j,k,l) -> S1_[i,l] * S2_[j,k]) +end + +""" + dot(::Vec, ::Vec) + dot(::Vec, ::SecondOrderTensor) + dot(::SecondOrderTensor, ::Vec) + dot(::SecondOrderTensor, ::SecondOrderTensor) + +Computes the dot product (single contraction) between two tensors. +The symbol `⋅`, written `\\cdot`, is overloaded for single contraction. + +# Examples +```jldoctest +julia> A = rand(Tensor{2, 2}) +2×2 Tensor{2, 2, Float64, 4}: + 0.590845 0.566237 + 0.766797 0.460085 + +julia> B = rand(Tensor{1, 2}) +2-element Vec{2, Float64}: + 0.7940257103317943 + 0.8541465903790502 + +julia> dot(A, B) +2-element Vec{2, Float64}: + 0.9527955925660736 + 1.0018368881367576 + +julia> A ⋅ B +2-element Vec{2, Float64}: + 0.9527955925660736 + 1.0018368881367576 +``` +""" +LinearAlgebra.dot(::AbstractTensor, ::AbstractTensor) + +""" + dot(::SymmetricTensor{2}) + +Compute the dot product of a symmetric second order tensor with itself. +Return a `SymmetricTensor`. + +See also [`tdot`](@ref) and [`dott`](@ref). + +# Examples +```jldoctest +julia> A = rand(SymmetricTensor{2,3}) +3×3 SymmetricTensor{2, 3, Float64, 6}: + 0.590845 0.766797 0.566237 + 0.766797 0.460085 0.794026 + 0.566237 0.794026 0.854147 + +julia> dot(A) +3×3 SymmetricTensor{2, 3, Float64, 6}: + 1.2577 1.25546 1.42706 + 1.25546 1.43013 1.47772 + 1.42706 1.47772 1.68067 +``` +""" +@inline LinearAlgebra.dot(S::SymmetricTensor{2}) = tdot(S) + +""" + tdot(A::SecondOrderTensor) + +Compute the transpose-dot product of `A` with itself, i.e. `dot(A', A)`. +Return a `SymmetricTensor`. + +# Examples +```jldoctest +julia> A = rand(Tensor{2,3}) +3×3 Tensor{2, 3, Float64, 9}: + 0.590845 0.460085 0.200586 + 0.766797 0.794026 0.298614 + 0.566237 0.854147 0.246837 + +julia> tdot(A) +3×3 SymmetricTensor{2, 3, Float64, 6}: + 1.2577 1.36435 0.48726 + 1.36435 1.57172 0.540229 + 0.48726 0.540229 0.190334 +``` +""" +@inline tdot(S::SecondOrderTensor) = unsafe_symmetric(S' ⋅ S) + +""" + dott(A::SecondOrderTensor) + +Compute the dot-transpose product of `A` with itself, i.e. `dot(A, A')`. +Return a `SymmetricTensor`. + +# Examples +```jldoctest +julia> A = rand(Tensor{2,3}) +3×3 Tensor{2, 3, Float64, 9}: + 0.590845 0.460085 0.200586 + 0.766797 0.794026 0.298614 + 0.566237 0.854147 0.246837 + +julia> dott(A) +3×3 SymmetricTensor{2, 3, Float64, 6}: + 0.601011 0.878275 0.777051 + 0.878275 1.30763 1.18611 + 0.777051 1.18611 1.11112 +``` +""" +@inline dott(S::SecondOrderTensor) = unsafe_symmetric(S ⋅ S') + +""" + cross(::Vec, ::Vec) + +Computes the cross product between two `Vec` vectors, returns a `Vec{3}`. For dimensions 1 and 2 the `Vec`'s +are expanded to 3D first. The infix operator `×` (written `\\times`) can also be used. + +# Examples +```jldoctest +julia> a = rand(Vec{3}) +3-element Vec{3, Float64}: + 0.5908446386657102 + 0.7667970365022592 + 0.5662374165061859 + +julia> b = rand(Vec{3}) +3-element Vec{3, Float64}: + 0.4600853424625171 + 0.7940257103317943 + 0.8541465903790502 + +julia> a × b +3-element Vec{3, Float64}: + 0.20535000738340053 + -0.24415039787171888 + 0.11635375677388776 +``` +""" +@inline LinearAlgebra.cross(u::Vec{3}, v::Vec{3}) = @inbounds Vec{3}((u[2]*v[3] - u[3]*v[2], u[3]*v[1] - u[1]*v[3], u[1]*v[2] - u[2]*v[1])) +@inline LinearAlgebra.cross(u::Vec{2,T1}, v::Vec{2,T2}) where {T1,T2} = @inbounds Vec{3}((zero(T1)*zero(T2), zero(T1)*zero(T2), u[1]*v[2] - u[2]*v[1])) +@inline LinearAlgebra.cross( ::Vec{1,T1}, ::Vec{1,T2}) where {T1,T2} = @inbounds zero(Vec{3,promote_type(T1,T2)}) diff --git a/src/transpose.jl b/src/transpose.jl index 4e8250ac..308e0aa1 100644 --- a/src/transpose.jl +++ b/src/transpose.jl @@ -1,50 +1,50 @@ -# transpose, majortranspose, minortranspose -""" - transpose(::Vec) - transpose(::SecondOrderTensor) - transpose(::FourthOrderTensor) - -Compute the transpose of a tensor. -For a fourth order tensor, the transpose is the minor transpose. - -# Examples -```jldoctest -julia> A = rand(Tensor{2,2}) -2×2 Tensor{2, 2, Float64, 4}: - 0.590845 0.566237 - 0.766797 0.460085 - -julia> A' -2×2 Tensor{2, 2, Float64, 4}: - 0.590845 0.766797 - 0.566237 0.460085 -``` -""" -@inline function Base.transpose(S::Tensor{2, dim}) where {dim} - Tensor{2, dim}(@inline function(i, j) @inbounds S[j,i]; end) -end - -@inline Base.transpose(S::SymmetricTensor{2}) = S - -""" - minortranspose(::FourthOrderTensor) - -Compute the minor transpose of a fourth order tensor. -""" -@inline function minortranspose(S::Tensor{4, dim}) where {dim} - Tensor{4, dim}(@inline function(i, j, k, l) @inbounds S[j,i,l,k]; end) -end - -@inline minortranspose(S::SymmetricTensor{4}) = S -@inline Base.transpose(S::FourthOrderTensor) = minortranspose(S) - -""" - majortranspose(::FourthOrderTensor) - -Compute the major transpose of a fourth order tensor. -""" -@inline function majortranspose(S::FourthOrderTensor{dim}) where {dim} - Tensor{4, dim}(@inline function(i, j, k, l) @inbounds S[k,l,i,j]; end) -end - -@inline Base.adjoint(S::AllTensors) = transpose(S) +# transpose, majortranspose, minortranspose +""" + transpose(::Vec) + transpose(::SecondOrderTensor) + transpose(::FourthOrderTensor) + +Compute the transpose of a tensor. +For a fourth order tensor, the transpose is the minor transpose. + +# Examples +```jldoctest +julia> A = rand(Tensor{2,2}) +2×2 Tensor{2, 2, Float64, 4}: + 0.590845 0.566237 + 0.766797 0.460085 + +julia> A' +2×2 Tensor{2, 2, Float64, 4}: + 0.590845 0.766797 + 0.566237 0.460085 +``` +""" +@inline function Base.transpose(S::Tensor{2, dim}) where {dim} + Tensor{2, dim}(@inline function(i, j) @inbounds S[j,i]; end) +end + +@inline Base.transpose(S::SymmetricTensor{2}) = S + +""" + minortranspose(::FourthOrderTensor) + +Compute the minor transpose of a fourth order tensor. +""" +@inline function minortranspose(S::Tensor{4, dim}) where {dim} + Tensor{4, dim}(@inline function(i, j, k, l) @inbounds S[j,i,l,k]; end) +end + +@inline minortranspose(S::SymmetricTensor{4}) = S +@inline Base.transpose(S::FourthOrderTensor) = minortranspose(S) + +""" + majortranspose(::FourthOrderTensor) + +Compute the major transpose of a fourth order tensor. +""" +@inline function majortranspose(S::FourthOrderTensor{dim}) where {dim} + Tensor{4, dim}(@inline function(i, j, k, l) @inbounds S[k,l,i,j]; end) +end + +@inline Base.adjoint(S::AllTensors) = transpose(S) diff --git a/src/utilities.jl b/src/utilities.jl index 550650a0..129b8579 100644 --- a/src/utilities.jl +++ b/src/utilities.jl @@ -1,104 +1,104 @@ -function tensor_create_linear(T::Union{Type{Tensor{order, dim}}, Type{SymmetricTensor{order, dim}}}, f) where {order, dim} - return Expr(:tuple, [f(i) for i=1:n_components(T)]...) -end - -function tensor_create(::Type{Tensor{order, dim}}, f) where {order, dim} - if order == 1 - ex = Expr(:tuple, [f(i) for i=1:dim]...) - elseif order == 2 - ex = Expr(:tuple, [f(i,j) for i=1:dim, j=1:dim]...) - elseif order == 3 - ex = Expr(:tuple, [f(i,j,k) for i=1:dim, j=1:dim, k=1:dim]...) - elseif order == 4 - ex = Expr(:tuple, [f(i,j,k,l) for i=1:dim, j=1:dim, k = 1:dim, l = 1:dim]...) - end - return ex -end - -function tensor_create(::Type{SymmetricTensor{order, dim}}, f) where {order, dim} - ex = Expr(:tuple) - if order == 2 - for j in 1:dim, i in j:dim - push!(ex.args, f(i, j)) - end - elseif order == 4 - for l in 1:dim, k in l:dim, j in 1:dim, i in j:dim - push!(ex.args, f(i, j, k, l)) - end - end - return ex -end - -# reduced two expressions by summing the products -# madd = true uses muladd instructions which is faster -# in some cases, like in double contraction -function reducer(ex1i, ex2i, madd=false) - ex1, ex2 = remove_duplicates(ex1i, ex2i) - N = length(ex1) - expr = :($(ex1[1]) * $(ex2[1])) - - for i in 2:N - expr = madd ? :(muladd($(ex1[i]), $(ex2[i]), $expr)) : - :($(expr) + $(ex1[i]) * $(ex2[i])) - end - return expr -end - -function remove_duplicates(ex1in, ex2in) - ex1out, ex2out = Expr[], Expr[] - exout = Expr[] - factors = ones(Int, length(ex1in)) - - for (ex1ine, ex2ine) in zip(ex1in, ex2in) - prod = :($ex1ine * $ex2ine) - i = findfirst(isequal(prod), exout) # check if this product exist in the output - if i == nothing # this product does not exist yet - push!(ex1out, ex1ine) - push!(ex2out, ex2ine) - push!(exout, prod) - else # found a duplicate - factors[i] += 1 - end - end - for i in 1:length(ex1out) - factors[i] != 1 && (ex1out[i] = :($(factors[i]) * $(ex1out[i]))) - end - return ex1out, ex2out -end - -# check symmetry and return -remove_duplicates(::Type{<:Tensor}, ex) = ex # do nothing if return type is a Tensor -function remove_duplicates(::Type{SymmetricTensor{order, dim}}, ex) where {order, dim} - ex.args = ex.args[SYMMETRIC_INDICES[order][dim]] - return ex -end - -# return types -# double contraction -function dcontract end -@pure getreturntype(::typeof(dcontract), ::Type{<:FourthOrderTensor{dim}}, ::Type{<:FourthOrderTensor{dim}}) where {dim} = Tensor{4, dim} -@pure getreturntype(::typeof(dcontract), ::Type{<:SymmetricTensor{4, dim}}, ::Type{<:SymmetricTensor{4, dim}}) where {dim} = SymmetricTensor{4, dim} -@pure getreturntype(::typeof(dcontract), ::Type{<:Tensor{4, dim}}, ::Type{<:SecondOrderTensor{dim}}) where {dim} = Tensor{2, dim} -@pure getreturntype(::typeof(dcontract), ::Type{<:SymmetricTensor{4, dim}}, ::Type{<:SecondOrderTensor{dim}}) where {dim} = SymmetricTensor{2, dim} -@pure getreturntype(::typeof(dcontract), ::Type{<:SecondOrderTensor{dim}}, ::Type{<:Tensor{4, dim}}) where {dim} = Tensor{2, dim} -@pure getreturntype(::typeof(dcontract), ::Type{<:SecondOrderTensor{dim}}, ::Type{<:SymmetricTensor{4, dim}}) where {dim} = SymmetricTensor{2, dim} -@pure getreturntype(::typeof(dcontract), ::Type{<:Tensor{3,dim}}, ::Type{<:SecondOrderTensor{dim}}) where {dim} = Vec{dim} -@pure getreturntype(::typeof(dcontract), ::Type{<:SecondOrderTensor{dim}}, ::Type{<:Tensor{3,dim}}) where {dim} = Vec{dim} -@pure getreturntype(::typeof(dcontract), ::Type{<:Tensor{3,dim}}, ::Type{<:FourthOrderTensor{dim}}) where {dim} = Tensor{3,dim} -@pure getreturntype(::typeof(dcontract), ::Type{<:FourthOrderTensor{dim}}, ::Type{<:Tensor{3,dim}}) where {dim} = Tensor{3,dim} - -# otimes -function otimes end -@pure getreturntype(::typeof(otimes), ::Type{<:Tensor{1, dim}}, ::Type{<:Tensor{1, dim}}) where {dim} = Tensor{2, dim} -@pure getreturntype(::typeof(otimes), ::Type{<:SecondOrderTensor{dim}}, ::Type{<:SecondOrderTensor{dim}}) where {dim} = Tensor{4, dim} -@pure getreturntype(::typeof(otimes), ::Type{<:SymmetricTensor{2, dim}}, ::Type{<:SymmetricTensor{2, dim}}) where {dim} = SymmetricTensor{4, dim} - -# unit stripping if necessary -function ustrip(S::SymmetricTensor{order,dim,T}) where {order, dim, T} - ou = oneunit(T) - if typeof(ou / ou) === T # no units - return S - else # units, so strip them by dividing with oneunit(T) - return SymmetricTensor{order,dim}(map(x -> x / ou, S.data)) - end -end +function tensor_create_linear(T::Union{Type{Tensor{order, dim}}, Type{SymmetricTensor{order, dim}}}, f) where {order, dim} + return Expr(:tuple, [f(i) for i=1:n_components(T)]...) +end + +function tensor_create(::Type{Tensor{order, dim}}, f) where {order, dim} + if order == 1 + ex = Expr(:tuple, [f(i) for i=1:dim]...) + elseif order == 2 + ex = Expr(:tuple, [f(i,j) for i=1:dim, j=1:dim]...) + elseif order == 3 + ex = Expr(:tuple, [f(i,j,k) for i=1:dim, j=1:dim, k=1:dim]...) + elseif order == 4 + ex = Expr(:tuple, [f(i,j,k,l) for i=1:dim, j=1:dim, k = 1:dim, l = 1:dim]...) + end + return ex +end + +function tensor_create(::Type{SymmetricTensor{order, dim}}, f) where {order, dim} + ex = Expr(:tuple) + if order == 2 + for j in 1:dim, i in j:dim + push!(ex.args, f(i, j)) + end + elseif order == 4 + for l in 1:dim, k in l:dim, j in 1:dim, i in j:dim + push!(ex.args, f(i, j, k, l)) + end + end + return ex +end + +# reduced two expressions by summing the products +# madd = true uses muladd instructions which is faster +# in some cases, like in double contraction +function reducer(ex1i, ex2i, madd=false) + ex1, ex2 = remove_duplicates(ex1i, ex2i) + N = length(ex1) + expr = :($(ex1[1]) * $(ex2[1])) + + for i in 2:N + expr = madd ? :(muladd($(ex1[i]), $(ex2[i]), $expr)) : + :($(expr) + $(ex1[i]) * $(ex2[i])) + end + return expr +end + +function remove_duplicates(ex1in, ex2in) + ex1out, ex2out = Expr[], Expr[] + exout = Expr[] + factors = ones(Int, length(ex1in)) + + for (ex1ine, ex2ine) in zip(ex1in, ex2in) + prod = :($ex1ine * $ex2ine) + i = findfirst(isequal(prod), exout) # check if this product exist in the output + if i == nothing # this product does not exist yet + push!(ex1out, ex1ine) + push!(ex2out, ex2ine) + push!(exout, prod) + else # found a duplicate + factors[i] += 1 + end + end + for i in 1:length(ex1out) + factors[i] != 1 && (ex1out[i] = :($(factors[i]) * $(ex1out[i]))) + end + return ex1out, ex2out +end + +# check symmetry and return +remove_duplicates(::Type{<:Tensor}, ex) = ex # do nothing if return type is a Tensor +function remove_duplicates(::Type{SymmetricTensor{order, dim}}, ex) where {order, dim} + ex.args = ex.args[SYMMETRIC_INDICES[order][dim]] + return ex +end + +# return types +# double contraction +function dcontract end +@pure getreturntype(::typeof(dcontract), ::Type{<:FourthOrderTensor{dim}}, ::Type{<:FourthOrderTensor{dim}}) where {dim} = Tensor{4, dim} +@pure getreturntype(::typeof(dcontract), ::Type{<:SymmetricTensor{4, dim}}, ::Type{<:SymmetricTensor{4, dim}}) where {dim} = SymmetricTensor{4, dim} +@pure getreturntype(::typeof(dcontract), ::Type{<:Tensor{4, dim}}, ::Type{<:SecondOrderTensor{dim}}) where {dim} = Tensor{2, dim} +@pure getreturntype(::typeof(dcontract), ::Type{<:SymmetricTensor{4, dim}}, ::Type{<:SecondOrderTensor{dim}}) where {dim} = SymmetricTensor{2, dim} +@pure getreturntype(::typeof(dcontract), ::Type{<:SecondOrderTensor{dim}}, ::Type{<:Tensor{4, dim}}) where {dim} = Tensor{2, dim} +@pure getreturntype(::typeof(dcontract), ::Type{<:SecondOrderTensor{dim}}, ::Type{<:SymmetricTensor{4, dim}}) where {dim} = SymmetricTensor{2, dim} +@pure getreturntype(::typeof(dcontract), ::Type{<:Tensor{3,dim}}, ::Type{<:SecondOrderTensor{dim}}) where {dim} = Vec{dim} +@pure getreturntype(::typeof(dcontract), ::Type{<:SecondOrderTensor{dim}}, ::Type{<:Tensor{3,dim}}) where {dim} = Vec{dim} +@pure getreturntype(::typeof(dcontract), ::Type{<:Tensor{3,dim}}, ::Type{<:FourthOrderTensor{dim}}) where {dim} = Tensor{3,dim} +@pure getreturntype(::typeof(dcontract), ::Type{<:FourthOrderTensor{dim}}, ::Type{<:Tensor{3,dim}}) where {dim} = Tensor{3,dim} + +# otimes +function otimes end +@pure getreturntype(::typeof(otimes), ::Type{<:Tensor{1, dim}}, ::Type{<:Tensor{1, dim}}) where {dim} = Tensor{2, dim} +@pure getreturntype(::typeof(otimes), ::Type{<:SecondOrderTensor{dim}}, ::Type{<:SecondOrderTensor{dim}}) where {dim} = Tensor{4, dim} +@pure getreturntype(::typeof(otimes), ::Type{<:SymmetricTensor{2, dim}}, ::Type{<:SymmetricTensor{2, dim}}) where {dim} = SymmetricTensor{4, dim} + +# unit stripping if necessary +function ustrip(S::SymmetricTensor{order,dim,T}) where {order, dim, T} + ou = oneunit(T) + if typeof(ou / ou) === T # no units + return S + else # units, so strip them by dividing with oneunit(T) + return SymmetricTensor{order,dim}(map(x -> x / ou, S.data)) + end +end diff --git a/src/voigt.jl b/src/voigt.jl index e771bcc0..424e635f 100644 --- a/src/voigt.jl +++ b/src/voigt.jl @@ -1,367 +1,367 @@ -const DEFAULT_VOIGT_ORDER = ([1], [1 3; 4 2], [1 6 5; 9 2 4; 8 7 3]) -""" - tovoigt([type::Type{<:AbstractArray}, ]A::Union{SecondOrderTensor, FourthOrderTensor}; kwargs...) - -Converts a tensor to "Voigt"-format. - -Optional argument: -- `type`: determines the returned Array type. Possible types are `Array` and `SArray` (see - [`StaticArrays`](https://juliaarrays.github.io/StaticArrays.jl/stable/)). - -Keyword arguments: - - `offdiagscale`: determines the scaling factor for the offdiagonal elements. - This argument is only applicable for `SymmetricTensor`s. `tomandel` can also - be used for the "Mandel"-format which sets `offdiagscale = √2` for `SymmetricTensor`s, - and is equivalent to `tovoigt` for `Tensor`s. - - `order`: matrix of the linear indices determining the Voigt order. The default - index order is `[11, 22, 33, 23, 13, 12, 32, 31, 21]`, corresponding to - `order = [1 6 5; 9 2 4; 8 7 3]`. - -See also [`tovoigt!`](@ref) and [`fromvoigt`](@ref). - -```jldoctest -julia> tovoigt(Tensor{2,3}(1:9)) -9-element Vector{Int64}: - 1 - 5 - 9 - 8 - 7 - 4 - 6 - 3 - 2 - -julia> tovoigt(SymmetricTensor{2,3}(1:6); offdiagscale = 2) -6-element Vector{Int64}: - 1 - 4 - 6 - 10 - 6 - 4 - -julia> tovoigt(Tensor{4,2}(1:16)) -4×4 Matrix{Int64}: - 1 13 9 5 - 4 16 12 8 - 3 15 11 7 - 2 14 10 6 - -julia> tovoigt(SMatrix, Tensor{4,2}(1:16)) -4×4 SMatrix{4, 4, Int64, 16} with indices SOneTo(4)×SOneTo(4): - 1 13 9 5 - 4 16 12 8 - 3 15 11 7 - 2 14 10 6 -``` - -""" -function tovoigt end -# default to regular Array -@inline function tovoigt(A::AbstractTensor; kwargs...) - return tovoigt(Array, A; kwargs...) -end -@inline tovoigt(::Type{Array}, A::SecondOrderTensor; kwargs...) = tovoigt(Vector, A; kwargs...) -@inline tovoigt(::Type{Array}, A::FourthOrderTensor; kwargs...) = tovoigt(Matrix, A; kwargs...) - -@inline function tovoigt(::Type{<:Vector}, A::Tensor{2, dim, T, M}; order=nothing) where {dim, T, M} - @inbounds _tovoigt!(Vector{T}(undef, M), A, order) -end -@inline function tovoigt(::Type{<:Matrix}, A::Tensor{4, dim, T, M}; order=nothing) where {dim, T, M} - @inbounds _tovoigt!(Matrix{T}(undef, Int(√M), Int(√M)), A, order) -end -@inline function tovoigt(::Type{<:Vector}, A::SymmetricTensor{2, dim, T, M}; offdiagscale=one(T), order=nothing) where {dim, T, M} - @inbounds _tovoigt!(Vector{T}(undef, M), A, order; offdiagscale=offdiagscale) -end -@inline function tovoigt(::Type{<:Matrix}, A::SymmetricTensor{4, dim, T, M}; offdiagscale=one(T), order=nothing) where {dim, T, M} - @inbounds _tovoigt!(Matrix{T}(undef, Int(√M), Int(√M)), A, order; offdiagscale=offdiagscale) -end - -""" - tovoigt!(v::AbstractArray, A::Union{SecondOrderTensor, FourthOrderTensor}; kwargs...) - -Converts a tensor to "Voigt"-format using the following index order: -`[11, 22, 33, 23, 13, 12, 32, 31, 21]`. - -Keyword arguments: - - `offset`: offset index for where in the array `A` the tensor should be stored. - For 4th order tensors the keyword arguments are `offset_i` and `offset_j`, - respectively. Defaults to `0`. - - `offdiagscale`: determines the scaling factor for the offdiagonal elements. - This argument is only applicable for `SymmetricTensor`s. `frommandel!` can also - be used for the "Mandel"-format which sets `offdiagscale = √2` for `SymmetricTensor`s, - and is equivalent to `fromvoigt!` for `Tensor`s. - - `order`: matrix of the linear indices determining the Voigt order. The default - index order is `[11, 22, 33, 23, 13, 12, 32, 31, 21]`. - -See also [`tovoigt`](@ref) and [`fromvoigt`](@ref). - -```jldoctest -julia> T = rand(Tensor{2,2}) -2×2 Tensor{2, 2, Float64, 4}: - 0.590845 0.566237 - 0.766797 0.460085 - -julia> x = zeros(4); - -julia> tovoigt!(x, T) -4-element Vector{Float64}: - 0.5908446386657102 - 0.4600853424625171 - 0.5662374165061859 - 0.7667970365022592 - -julia> x = zeros(5); - -julia> tovoigt!(x, T; offset=1) -5-element Vector{Float64}: - 0.0 - 0.5908446386657102 - 0.4600853424625171 - 0.5662374165061859 - 0.7667970365022592 -``` -""" -function tovoigt! end -Base.@propagate_inbounds function tovoigt!(v::AbstractVector, A::Tensor{2}; offset::Int=0, order=nothing) - _tovoigt!(v, A, order; offset=offset) -end -Base.@propagate_inbounds function tovoigt!(v::AbstractMatrix, A::Tensor{4}; offset_i::Int=0, offset_j::Int=0, order=nothing) - _tovoigt!(v, A, order; offset_i=offset_i, offset_j=offset_j) -end -Base.@propagate_inbounds function tovoigt!(v::AbstractVector{T}, A::SymmetricTensor{2}; offdiagscale=one(T), offset::Int=0, order=nothing) where T - _tovoigt!(v, A, order; offdiagscale=offdiagscale, offset=offset) -end -Base.@propagate_inbounds function tovoigt!(v::AbstractMatrix{T}, A::SymmetricTensor{4}; offdiagscale=one(T), offset_i::Int=0, offset_j::Int=0, order=nothing) where T - _tovoigt!(v, A, order; offdiagscale=offdiagscale, offset_i=offset_i, offset_j=offset_j) -end - -# default voigt order (faster than custom voigt order) -Base.@propagate_inbounds function _tovoigt!(v::AbstractVecOrMat{T}, A::SecondOrderTensor{dim}, ::Nothing; offset=0, offdiagscale=one(T)) where {dim,T} - tuple_data, = _to_voigt_tuple(A, offdiagscale) - for i in eachindex(tuple_data) - v[offset+i] = tuple_data[i] - end - return v -end -Base.@propagate_inbounds function _tovoigt!(v::AbstractVecOrMat{T}, A::SymmetricTensor{4,dim}, ::Nothing; offdiagscale=one(T), offset_i=0, offset_j=0) where {dim,T} - tuple_data, N = _to_voigt_tuple(A, offdiagscale) - cartesian = CartesianIndices(((offset_i+1):(offset_i+N), (offset_j+1):(offset_j+N))) - for i in eachindex(tuple_data) - v[cartesian[i]] = tuple_data[i] - end - return v -end - -Base.@propagate_inbounds function _tovoigt!(v::AbstractVecOrMat{T}, A::Tensor{4,dim}, ::Nothing; kwargs...) where {dim,T} - return _tovoigt!(v, A, DEFAULT_VOIGT_ORDER[dim]; kwargs...) -end - -# custom voigt order (slower than default voigt order) -Base.@propagate_inbounds function _tovoigt!(v::AbstractVector, A::Tensor{2, dim}, order::AbstractVecOrMat; offset::Int=0) where {dim} - for j in 1:dim, i in 1:dim - v[offset + order[i, j]] = A[i, j] - end - return v -end -Base.@propagate_inbounds function _tovoigt!(v::AbstractMatrix, A::Tensor{4, dim}, order::AbstractVecOrMat; offset_i::Int=0, offset_j::Int=0) where {dim} - for l in 1:dim, k in 1:dim, j in 1:dim, i in 1:dim - v[offset_i + order[i, j], offset_j + order[k, l]] = A[i, j, k, l] - end - return v -end -Base.@propagate_inbounds function _tovoigt!(v::AbstractVector{T}, A::SymmetricTensor{2, dim}, order::AbstractVecOrMat; offdiagscale=one(T), offset::Int=0) where {T, dim} - for j in 1:dim, i in 1:j - v[offset + order[i, j]] = i == j ? A[i, j] : A[i, j] * offdiagscale - end - return v -end -Base.@propagate_inbounds function _tovoigt!(v::AbstractMatrix{T}, A::SymmetricTensor{4, dim}, order::AbstractVecOrMat; offdiagscale=one(T), offset_i::Int=0, offset_j::Int=0) where {T, dim} - for l in 1:dim, k in 1:l, j in 1:dim, i in 1:j - v[offset_i + order[i, j], offset_j + order[k, l]] = - (i == j && k == l) ? A[i, j, k, l] : - (i == j || k == l) ? A[i, j, k, l] * offdiagscale : - A[i, j, k, l] * (offdiagscale * offdiagscale) - end - return v -end - -""" - tomandel(A; kwargs...) - -Convert the tensor `A` to voigt-form using the Mandel convention, see [`tovoigt`](@ref). -""" -@inline tomandel(A::AbstractTensor; kwargs...) = tomandel(Array, A; kwargs...) -@inline tomandel(::Type{AT}, A::SymmetricTensor{o, dim, T}; kwargs...) where{AT,o,dim,T} = tovoigt(AT, A; offdiagscale=√(2one(T)), kwargs...) -@inline tomandel(::Type{AT}, A::Tensor; kwargs...) where AT = tovoigt(AT, A; kwargs...) - -""" - tomandel!(v, A; kwargs...) - -Fill the array `v` with the values in `A` on voigt-form using the Mandel convention, see [`tovoigt!`](@ref). -""" -Base.@propagate_inbounds tomandel!(v::AbstractVecOrMat{T}, A::SymmetricTensor; kwargs...) where{T} = tovoigt!(v, A; offdiagscale=√(2one(T)), kwargs...) -Base.@propagate_inbounds tomandel!(v::AbstractVecOrMat, A::Tensor; kwargs...) = tovoigt!(v, A; kwargs...) - -""" - fromvoigt(S::Type{<:AbstractTensor}, A::AbstractArray{T}; kwargs...) - -Converts an array `A` stored in Voigt format to a Tensor of type `S`. - -Keyword arguments: - - `offset`: offset index for where in the array `A` the tensor starts. For 4th order - tensors the keyword arguments are `offset_i` and `offset_j`, respectively. - Defaults to `0`. - - `offdiagscale`: determines the scaling factor for the offdiagonal elements. - This argument is only applicable for `SymmetricTensor`s. `frommandel` can also - be used for the "Mandel"-format which sets `offdiagscale = √2` for `SymmetricTensor`s, - and is equivalent to `fromvoigt` for `Tensor`s. - - `order`: matrix of the linear indices determining the Voigt order. The default - index order is `[11, 22, 33, 23, 13, 12, 32, 31, 21]`, corresponding to - `order = [1 6 5; 9 2 4; 8 7 3]`. - -See also [`tovoigt`](@ref). - -```jldoctest -julia> fromvoigt(Tensor{2,3}, 1.0:1.0:9.0) -3×3 Tensor{2, 3, Float64, 9}: - 1.0 6.0 5.0 - 9.0 2.0 4.0 - 8.0 7.0 3.0 -``` -""" -Base.@propagate_inbounds function fromvoigt(TT::Type{<: Tensor{2, dim}}, v::AbstractVector; offset::Int=0, order=DEFAULT_VOIGT_ORDER[dim]) where {dim} - return TT(function (i, j); return v[offset + order[i, j]]; end) -end -Base.@propagate_inbounds function fromvoigt(TT::Type{<: Tensor{4, dim}}, v::AbstractMatrix; offset_i::Int=0, offset_j::Int=0, order=DEFAULT_VOIGT_ORDER[dim]) where {dim} - return TT(function (i, j, k, l); return v[offset_i + order[i, j], offset_j + order[k, l]]; end) -end -Base.@propagate_inbounds function fromvoigt(TT::Type{<: SymmetricTensor{2, dim}}, v::AbstractVector{T}; offdiagscale = one(T), offset::Int=0, order=DEFAULT_VOIGT_ORDER[dim]) where {dim, T} - return TT(function (i, j) - i > j && ((i, j) = (j, i)) - i == j ? (return v[offset + order[i, j]]) : - (return T(v[offset + order[i, j]] / offdiagscale)) - end) -end -Base.@propagate_inbounds function fromvoigt(TT::Type{<: SymmetricTensor{4, dim}}, v::AbstractMatrix{T}; offdiagscale = one(T), offset_i::Int=0, offset_j::Int=0, order=DEFAULT_VOIGT_ORDER[dim]) where {dim, T} - return TT(function (i, j, k, l) - i > j && ((i, j) = (j, i)) - k > l && ((k, l) = (l, k)) - i == j && k == l ? (return v[offset_i + order[i, j], offset_j + order[k, l]]) : - i == j || k == l ? (return T(v[offset_i + order[i, j], offset_j + order[k, l]] / offdiagscale)) : - (return T(v[offset_i + order[i, j], offset_j + order[k, l]] / (offdiagscale * offdiagscale))) - end) -end - -""" - frommandel(TT, v; kwargs...) - -Convert the Array `v` in voigt-format, following the Mandel convention, to a tensor of type `TT`, see [`fromvoigt`](@ref). -""" -Base.@propagate_inbounds frommandel(TT::Type{<: SymmetricTensor}, v::AbstractVecOrMat{T}; kwargs...) where{T} = fromvoigt(TT, v; offdiagscale=√(2one(T)), kwargs...) -Base.@propagate_inbounds frommandel(TT::Type{<: Tensor}, v::AbstractVecOrMat; kwargs...) = fromvoigt(TT, v; kwargs...) - -############################################################## -# Reorder tensor data to tuple in default voigt format order # -############################################################## -function __to_voigt_tuple(A::Type{TT}, s=one(T)) where {TT<:SecondOrderTensor{dim,T}} where {dim,T} - # Define internals for generation - idx_fun(i, j) = compute_index(get_base(A), i, j) - maxind(j) = TT<:SymmetricTensor ? j : dim - N = n_components(get_base(A)) - - exps = Expr(:tuple) - append!(exps.args, [nothing for _ in 1:N]) # "Preallocate" to allow indexing directly - - for j in 1:dim, i in 1:maxind(j) - voigt_ind = DEFAULT_VOIGT_ORDER[dim][i,j] - if i==j - exps.args[voigt_ind] = :(get_data(A)[$(idx_fun(i, j))]) - else - exps.args[voigt_ind] = :(s*get_data(A)[$(idx_fun(i, j))]) - end - end - return exps, N -end - -function __to_voigt_tuple(A::Type{TT}, s=one(T)) where {TT<:FourthOrderTensor{dim,T}} where {dim,T} - # Define internals for generation - idx_fun(i, j, k, l) = compute_index(get_base(A), i, j, k, l) # why no change needed above? - maxind(j) = TT<:SymmetricTensor ? j : dim - N = Int(sqrt(n_components(get_base(A)))) - voigt_lin_index(vi, vj) = (vj-1)*N + vi - - exps = Expr(:tuple) - append!(exps.args, [nothing for _ in 1:N^2]) # "Preallocate" to allow indexing directly - - for l in 1:dim, k in 1:maxind(l), j in 1:dim, i in 1:maxind(j) - voigt_lin_ind = voigt_lin_index(DEFAULT_VOIGT_ORDER[dim][i,j], DEFAULT_VOIGT_ORDER[dim][k,l]) - if i==j && k==l - exps.args[voigt_lin_ind] = :(get_data(A)[$(idx_fun(i, j, k, l))]) - elseif i!=j && k!=l - exps.args[voigt_lin_ind] = :(s*s*get_data(A)[$(idx_fun(i, j, k, l))]) - else - exps.args[voigt_lin_ind] = :(s*get_data(A)[$(idx_fun(i, j, k, l))]) - end - end - return exps, N -end - -@generated function _to_voigt_tuple(A::AbstractTensor{order, dim, T}, s=one(T)) where {order,dim,T} - exps, N = __to_voigt_tuple(A, s) - quote - $(Expr(:meta, :inline)) - @inbounds return $exps, $N - end -end - -######################## -# StaticArrays support # -######################## -@inline tovoigt(::Type{SArray}, A::SecondOrderTensor; kwargs...) = tovoigt(SVector, A; kwargs...) -@inline tovoigt(::Type{SArray}, A::FourthOrderTensor; kwargs...) = tovoigt(SMatrix, A; kwargs...) -@inline function tovoigt(::Type{<:SVector}, A::Tensor{2, dim, T, M}; order=nothing) where {dim, T, M} - @inbounds _to_static_voigt(A, order) -end -@inline function tovoigt(::Type{<:SMatrix}, A::Tensor{4, dim, T, M}; order=nothing) where {dim, T, M} - @inbounds _to_static_voigt(A, order) -end -@inline function tovoigt(::Type{<:SVector}, A::SymmetricTensor{2, dim, T, M}; offdiagscale=one(T), order=nothing) where {dim, T, M} - @inbounds _to_static_voigt(A, order; offdiagscale=offdiagscale) -end -@inline function tovoigt(::Type{<:SMatrix}, A::SymmetricTensor{4, dim, T}; offdiagscale=one(T), order=nothing) where {dim, T} - @inbounds _to_static_voigt(A, order; offdiagscale=offdiagscale) -end - -# default voigt order -Base.@propagate_inbounds function _to_static_voigt(A::TT, ::Nothing; offdiagscale=one(T)) where {TT<:SecondOrderTensor{dim,T}} where {dim,T} - tuple_data, N = _to_voigt_tuple(A, offdiagscale) - return SVector{N, T}(tuple_data) -end -Base.@propagate_inbounds function _to_static_voigt(A::TT, ::Nothing; offdiagscale=one(T)) where {TT<:FourthOrderTensor{dim,T}} where {dim,T} - tuple_data, N = _to_voigt_tuple(A, offdiagscale) - return SMatrix{N, N, T}(tuple_data) -end - -# custom voigt order -@inline function _to_static_voigt(A::Tensor{2, dim, T, M}, order::AbstractVecOrMat) where {dim, T, M} - v = MVector{M, T}(undef) - @inbounds _tovoigt!(v, A, order) - return SVector(v) -end -@inline function _to_static_voigt(A::Tensor{4, dim, T, M}, order::AbstractVecOrMat) where {dim, T, M} - m = MMatrix{Int(√M), Int(√M), T}(undef) - @inbounds _tovoigt!(m, A, order) - return SMatrix(m) -end -@inline function _to_static_voigt(A::SymmetricTensor{2, dim, T, M}, order::AbstractVecOrMat; offdiagscale=one(T)) where {dim, T, M} - v = MVector{M, T}(undef) - @inbounds _tovoigt!(v, A, order; offdiagscale=offdiagscale) - return SVector(v) -end -@inline function _to_static_voigt(A::SymmetricTensor{4, dim, T, M}, order::AbstractVecOrMat; offdiagscale=one(T)) where {dim, T, M} - m = MMatrix{Int(√M), Int(√M), T}(undef) - @inbounds _tovoigt!(m, A, order; offdiagscale=offdiagscale) - return SMatrix(m) -end +const DEFAULT_VOIGT_ORDER = ([1], [1 3; 4 2], [1 6 5; 9 2 4; 8 7 3]) +""" + tovoigt([type::Type{<:AbstractArray}, ]A::Union{SecondOrderTensor, FourthOrderTensor}; kwargs...) + +Converts a tensor to "Voigt"-format. + +Optional argument: +- `type`: determines the returned Array type. Possible types are `Array` and `SArray` (see + [`StaticArrays`](https://juliaarrays.github.io/StaticArrays.jl/stable/)). + +Keyword arguments: + - `offdiagscale`: determines the scaling factor for the offdiagonal elements. + This argument is only applicable for `SymmetricTensor`s. `tomandel` can also + be used for the "Mandel"-format which sets `offdiagscale = √2` for `SymmetricTensor`s, + and is equivalent to `tovoigt` for `Tensor`s. + - `order`: matrix of the linear indices determining the Voigt order. The default + index order is `[11, 22, 33, 23, 13, 12, 32, 31, 21]`, corresponding to + `order = [1 6 5; 9 2 4; 8 7 3]`. + +See also [`tovoigt!`](@ref) and [`fromvoigt`](@ref). + +```jldoctest +julia> tovoigt(Tensor{2,3}(1:9)) +9-element Vector{Int64}: + 1 + 5 + 9 + 8 + 7 + 4 + 6 + 3 + 2 + +julia> tovoigt(SymmetricTensor{2,3}(1:6); offdiagscale = 2) +6-element Vector{Int64}: + 1 + 4 + 6 + 10 + 6 + 4 + +julia> tovoigt(Tensor{4,2}(1:16)) +4×4 Matrix{Int64}: + 1 13 9 5 + 4 16 12 8 + 3 15 11 7 + 2 14 10 6 + +julia> tovoigt(SMatrix, Tensor{4,2}(1:16)) +4×4 SMatrix{4, 4, Int64, 16} with indices SOneTo(4)×SOneTo(4): + 1 13 9 5 + 4 16 12 8 + 3 15 11 7 + 2 14 10 6 +``` + +""" +function tovoigt end +# default to regular Array +@inline function tovoigt(A::AbstractTensor; kwargs...) + return tovoigt(Array, A; kwargs...) +end +@inline tovoigt(::Type{Array}, A::SecondOrderTensor; kwargs...) = tovoigt(Vector, A; kwargs...) +@inline tovoigt(::Type{Array}, A::FourthOrderTensor; kwargs...) = tovoigt(Matrix, A; kwargs...) + +@inline function tovoigt(::Type{<:Vector}, A::Tensor{2, dim, T, M}; order=nothing) where {dim, T, M} + @inbounds _tovoigt!(Vector{T}(undef, M), A, order) +end +@inline function tovoigt(::Type{<:Matrix}, A::Tensor{4, dim, T, M}; order=nothing) where {dim, T, M} + @inbounds _tovoigt!(Matrix{T}(undef, Int(√M), Int(√M)), A, order) +end +@inline function tovoigt(::Type{<:Vector}, A::SymmetricTensor{2, dim, T, M}; offdiagscale=one(T), order=nothing) where {dim, T, M} + @inbounds _tovoigt!(Vector{T}(undef, M), A, order; offdiagscale=offdiagscale) +end +@inline function tovoigt(::Type{<:Matrix}, A::SymmetricTensor{4, dim, T, M}; offdiagscale=one(T), order=nothing) where {dim, T, M} + @inbounds _tovoigt!(Matrix{T}(undef, Int(√M), Int(√M)), A, order; offdiagscale=offdiagscale) +end + +""" + tovoigt!(v::AbstractArray, A::Union{SecondOrderTensor, FourthOrderTensor}; kwargs...) + +Converts a tensor to "Voigt"-format using the following index order: +`[11, 22, 33, 23, 13, 12, 32, 31, 21]`. + +Keyword arguments: + - `offset`: offset index for where in the array `A` the tensor should be stored. + For 4th order tensors the keyword arguments are `offset_i` and `offset_j`, + respectively. Defaults to `0`. + - `offdiagscale`: determines the scaling factor for the offdiagonal elements. + This argument is only applicable for `SymmetricTensor`s. `frommandel!` can also + be used for the "Mandel"-format which sets `offdiagscale = √2` for `SymmetricTensor`s, + and is equivalent to `fromvoigt!` for `Tensor`s. + - `order`: matrix of the linear indices determining the Voigt order. The default + index order is `[11, 22, 33, 23, 13, 12, 32, 31, 21]`. + +See also [`tovoigt`](@ref) and [`fromvoigt`](@ref). + +```jldoctest +julia> T = rand(Tensor{2,2}) +2×2 Tensor{2, 2, Float64, 4}: + 0.590845 0.566237 + 0.766797 0.460085 + +julia> x = zeros(4); + +julia> tovoigt!(x, T) +4-element Vector{Float64}: + 0.5908446386657102 + 0.4600853424625171 + 0.5662374165061859 + 0.7667970365022592 + +julia> x = zeros(5); + +julia> tovoigt!(x, T; offset=1) +5-element Vector{Float64}: + 0.0 + 0.5908446386657102 + 0.4600853424625171 + 0.5662374165061859 + 0.7667970365022592 +``` +""" +function tovoigt! end +Base.@propagate_inbounds function tovoigt!(v::AbstractVector, A::Tensor{2}; offset::Int=0, order=nothing) + _tovoigt!(v, A, order; offset=offset) +end +Base.@propagate_inbounds function tovoigt!(v::AbstractMatrix, A::Tensor{4}; offset_i::Int=0, offset_j::Int=0, order=nothing) + _tovoigt!(v, A, order; offset_i=offset_i, offset_j=offset_j) +end +Base.@propagate_inbounds function tovoigt!(v::AbstractVector{T}, A::SymmetricTensor{2}; offdiagscale=one(T), offset::Int=0, order=nothing) where T + _tovoigt!(v, A, order; offdiagscale=offdiagscale, offset=offset) +end +Base.@propagate_inbounds function tovoigt!(v::AbstractMatrix{T}, A::SymmetricTensor{4}; offdiagscale=one(T), offset_i::Int=0, offset_j::Int=0, order=nothing) where T + _tovoigt!(v, A, order; offdiagscale=offdiagscale, offset_i=offset_i, offset_j=offset_j) +end + +# default voigt order (faster than custom voigt order) +Base.@propagate_inbounds function _tovoigt!(v::AbstractVecOrMat{T}, A::SecondOrderTensor{dim}, ::Nothing; offset=0, offdiagscale=one(T)) where {dim,T} + tuple_data, = _to_voigt_tuple(A, offdiagscale) + for i in eachindex(tuple_data) + v[offset+i] = tuple_data[i] + end + return v +end +Base.@propagate_inbounds function _tovoigt!(v::AbstractVecOrMat{T}, A::SymmetricTensor{4,dim}, ::Nothing; offdiagscale=one(T), offset_i=0, offset_j=0) where {dim,T} + tuple_data, N = _to_voigt_tuple(A, offdiagscale) + cartesian = CartesianIndices(((offset_i+1):(offset_i+N), (offset_j+1):(offset_j+N))) + for i in eachindex(tuple_data) + v[cartesian[i]] = tuple_data[i] + end + return v +end + +Base.@propagate_inbounds function _tovoigt!(v::AbstractVecOrMat{T}, A::Tensor{4,dim}, ::Nothing; kwargs...) where {dim,T} + return _tovoigt!(v, A, DEFAULT_VOIGT_ORDER[dim]; kwargs...) +end + +# custom voigt order (slower than default voigt order) +Base.@propagate_inbounds function _tovoigt!(v::AbstractVector, A::Tensor{2, dim}, order::AbstractVecOrMat; offset::Int=0) where {dim} + for j in 1:dim, i in 1:dim + v[offset + order[i, j]] = A[i, j] + end + return v +end +Base.@propagate_inbounds function _tovoigt!(v::AbstractMatrix, A::Tensor{4, dim}, order::AbstractVecOrMat; offset_i::Int=0, offset_j::Int=0) where {dim} + for l in 1:dim, k in 1:dim, j in 1:dim, i in 1:dim + v[offset_i + order[i, j], offset_j + order[k, l]] = A[i, j, k, l] + end + return v +end +Base.@propagate_inbounds function _tovoigt!(v::AbstractVector{T}, A::SymmetricTensor{2, dim}, order::AbstractVecOrMat; offdiagscale=one(T), offset::Int=0) where {T, dim} + for j in 1:dim, i in 1:j + v[offset + order[i, j]] = i == j ? A[i, j] : A[i, j] * offdiagscale + end + return v +end +Base.@propagate_inbounds function _tovoigt!(v::AbstractMatrix{T}, A::SymmetricTensor{4, dim}, order::AbstractVecOrMat; offdiagscale=one(T), offset_i::Int=0, offset_j::Int=0) where {T, dim} + for l in 1:dim, k in 1:l, j in 1:dim, i in 1:j + v[offset_i + order[i, j], offset_j + order[k, l]] = + (i == j && k == l) ? A[i, j, k, l] : + (i == j || k == l) ? A[i, j, k, l] * offdiagscale : + A[i, j, k, l] * (offdiagscale * offdiagscale) + end + return v +end + +""" + tomandel(A; kwargs...) + +Convert the tensor `A` to voigt-form using the Mandel convention, see [`tovoigt`](@ref). +""" +@inline tomandel(A::AbstractTensor; kwargs...) = tomandel(Array, A; kwargs...) +@inline tomandel(::Type{AT}, A::SymmetricTensor{o, dim, T}; kwargs...) where{AT,o,dim,T} = tovoigt(AT, A; offdiagscale=√(2one(T)), kwargs...) +@inline tomandel(::Type{AT}, A::Tensor; kwargs...) where AT = tovoigt(AT, A; kwargs...) + +""" + tomandel!(v, A; kwargs...) + +Fill the array `v` with the values in `A` on voigt-form using the Mandel convention, see [`tovoigt!`](@ref). +""" +Base.@propagate_inbounds tomandel!(v::AbstractVecOrMat{T}, A::SymmetricTensor; kwargs...) where{T} = tovoigt!(v, A; offdiagscale=√(2one(T)), kwargs...) +Base.@propagate_inbounds tomandel!(v::AbstractVecOrMat, A::Tensor; kwargs...) = tovoigt!(v, A; kwargs...) + +""" + fromvoigt(S::Type{<:AbstractTensor}, A::AbstractArray{T}; kwargs...) + +Converts an array `A` stored in Voigt format to a Tensor of type `S`. + +Keyword arguments: + - `offset`: offset index for where in the array `A` the tensor starts. For 4th order + tensors the keyword arguments are `offset_i` and `offset_j`, respectively. + Defaults to `0`. + - `offdiagscale`: determines the scaling factor for the offdiagonal elements. + This argument is only applicable for `SymmetricTensor`s. `frommandel` can also + be used for the "Mandel"-format which sets `offdiagscale = √2` for `SymmetricTensor`s, + and is equivalent to `fromvoigt` for `Tensor`s. + - `order`: matrix of the linear indices determining the Voigt order. The default + index order is `[11, 22, 33, 23, 13, 12, 32, 31, 21]`, corresponding to + `order = [1 6 5; 9 2 4; 8 7 3]`. + +See also [`tovoigt`](@ref). + +```jldoctest +julia> fromvoigt(Tensor{2,3}, 1.0:1.0:9.0) +3×3 Tensor{2, 3, Float64, 9}: + 1.0 6.0 5.0 + 9.0 2.0 4.0 + 8.0 7.0 3.0 +``` +""" +Base.@propagate_inbounds function fromvoigt(TT::Type{<: Tensor{2, dim}}, v::AbstractVector; offset::Int=0, order=DEFAULT_VOIGT_ORDER[dim]) where {dim} + return TT(function (i, j); return v[offset + order[i, j]]; end) +end +Base.@propagate_inbounds function fromvoigt(TT::Type{<: Tensor{4, dim}}, v::AbstractMatrix; offset_i::Int=0, offset_j::Int=0, order=DEFAULT_VOIGT_ORDER[dim]) where {dim} + return TT(function (i, j, k, l); return v[offset_i + order[i, j], offset_j + order[k, l]]; end) +end +Base.@propagate_inbounds function fromvoigt(TT::Type{<: SymmetricTensor{2, dim}}, v::AbstractVector{T}; offdiagscale = one(T), offset::Int=0, order=DEFAULT_VOIGT_ORDER[dim]) where {dim, T} + return TT(function (i, j) + i > j && ((i, j) = (j, i)) + i == j ? (return v[offset + order[i, j]]) : + (return T(v[offset + order[i, j]] / offdiagscale)) + end) +end +Base.@propagate_inbounds function fromvoigt(TT::Type{<: SymmetricTensor{4, dim}}, v::AbstractMatrix{T}; offdiagscale = one(T), offset_i::Int=0, offset_j::Int=0, order=DEFAULT_VOIGT_ORDER[dim]) where {dim, T} + return TT(function (i, j, k, l) + i > j && ((i, j) = (j, i)) + k > l && ((k, l) = (l, k)) + i == j && k == l ? (return v[offset_i + order[i, j], offset_j + order[k, l]]) : + i == j || k == l ? (return T(v[offset_i + order[i, j], offset_j + order[k, l]] / offdiagscale)) : + (return T(v[offset_i + order[i, j], offset_j + order[k, l]] / (offdiagscale * offdiagscale))) + end) +end + +""" + frommandel(TT, v; kwargs...) + +Convert the Array `v` in voigt-format, following the Mandel convention, to a tensor of type `TT`, see [`fromvoigt`](@ref). +""" +Base.@propagate_inbounds frommandel(TT::Type{<: SymmetricTensor}, v::AbstractVecOrMat{T}; kwargs...) where{T} = fromvoigt(TT, v; offdiagscale=√(2one(T)), kwargs...) +Base.@propagate_inbounds frommandel(TT::Type{<: Tensor}, v::AbstractVecOrMat; kwargs...) = fromvoigt(TT, v; kwargs...) + +############################################################## +# Reorder tensor data to tuple in default voigt format order # +############################################################## +function __to_voigt_tuple(A::Type{TT}, s=one(T)) where {TT<:SecondOrderTensor{dim,T}} where {dim,T} + # Define internals for generation + idx_fun(i, j) = compute_index(get_base(A), i, j) + maxind(j) = TT<:SymmetricTensor ? j : dim + N = n_components(get_base(A)) + + exps = Expr(:tuple) + append!(exps.args, [nothing for _ in 1:N]) # "Preallocate" to allow indexing directly + + for j in 1:dim, i in 1:maxind(j) + voigt_ind = DEFAULT_VOIGT_ORDER[dim][i,j] + if i==j + exps.args[voigt_ind] = :(get_data(A)[$(idx_fun(i, j))]) + else + exps.args[voigt_ind] = :(s*get_data(A)[$(idx_fun(i, j))]) + end + end + return exps, N +end + +function __to_voigt_tuple(A::Type{TT}, s=one(T)) where {TT<:FourthOrderTensor{dim,T}} where {dim,T} + # Define internals for generation + idx_fun(i, j, k, l) = compute_index(get_base(A), i, j, k, l) # why no change needed above? + maxind(j) = TT<:SymmetricTensor ? j : dim + N = Int(sqrt(n_components(get_base(A)))) + voigt_lin_index(vi, vj) = (vj-1)*N + vi + + exps = Expr(:tuple) + append!(exps.args, [nothing for _ in 1:N^2]) # "Preallocate" to allow indexing directly + + for l in 1:dim, k in 1:maxind(l), j in 1:dim, i in 1:maxind(j) + voigt_lin_ind = voigt_lin_index(DEFAULT_VOIGT_ORDER[dim][i,j], DEFAULT_VOIGT_ORDER[dim][k,l]) + if i==j && k==l + exps.args[voigt_lin_ind] = :(get_data(A)[$(idx_fun(i, j, k, l))]) + elseif i!=j && k!=l + exps.args[voigt_lin_ind] = :(s*s*get_data(A)[$(idx_fun(i, j, k, l))]) + else + exps.args[voigt_lin_ind] = :(s*get_data(A)[$(idx_fun(i, j, k, l))]) + end + end + return exps, N +end + +@generated function _to_voigt_tuple(A::AbstractTensor{order, dim, T}, s=one(T)) where {order,dim,T} + exps, N = __to_voigt_tuple(A, s) + quote + $(Expr(:meta, :inline)) + @inbounds return $exps, $N + end +end + +######################## +# StaticArrays support # +######################## +@inline tovoigt(::Type{SArray}, A::SecondOrderTensor; kwargs...) = tovoigt(SVector, A; kwargs...) +@inline tovoigt(::Type{SArray}, A::FourthOrderTensor; kwargs...) = tovoigt(SMatrix, A; kwargs...) +@inline function tovoigt(::Type{<:SVector}, A::Tensor{2, dim, T, M}; order=nothing) where {dim, T, M} + @inbounds _to_static_voigt(A, order) +end +@inline function tovoigt(::Type{<:SMatrix}, A::Tensor{4, dim, T, M}; order=nothing) where {dim, T, M} + @inbounds _to_static_voigt(A, order) +end +@inline function tovoigt(::Type{<:SVector}, A::SymmetricTensor{2, dim, T, M}; offdiagscale=one(T), order=nothing) where {dim, T, M} + @inbounds _to_static_voigt(A, order; offdiagscale=offdiagscale) +end +@inline function tovoigt(::Type{<:SMatrix}, A::SymmetricTensor{4, dim, T}; offdiagscale=one(T), order=nothing) where {dim, T} + @inbounds _to_static_voigt(A, order; offdiagscale=offdiagscale) +end + +# default voigt order +Base.@propagate_inbounds function _to_static_voigt(A::TT, ::Nothing; offdiagscale=one(T)) where {TT<:SecondOrderTensor{dim,T}} where {dim,T} + tuple_data, N = _to_voigt_tuple(A, offdiagscale) + return SVector{N, T}(tuple_data) +end +Base.@propagate_inbounds function _to_static_voigt(A::TT, ::Nothing; offdiagscale=one(T)) where {TT<:FourthOrderTensor{dim,T}} where {dim,T} + tuple_data, N = _to_voigt_tuple(A, offdiagscale) + return SMatrix{N, N, T}(tuple_data) +end + +# custom voigt order +@inline function _to_static_voigt(A::Tensor{2, dim, T, M}, order::AbstractVecOrMat) where {dim, T, M} + v = MVector{M, T}(undef) + @inbounds _tovoigt!(v, A, order) + return SVector(v) +end +@inline function _to_static_voigt(A::Tensor{4, dim, T, M}, order::AbstractVecOrMat) where {dim, T, M} + m = MMatrix{Int(√M), Int(√M), T}(undef) + @inbounds _tovoigt!(m, A, order) + return SMatrix(m) +end +@inline function _to_static_voigt(A::SymmetricTensor{2, dim, T, M}, order::AbstractVecOrMat; offdiagscale=one(T)) where {dim, T, M} + v = MVector{M, T}(undef) + @inbounds _tovoigt!(v, A, order; offdiagscale=offdiagscale) + return SVector(v) +end +@inline function _to_static_voigt(A::SymmetricTensor{4, dim, T, M}, order::AbstractVecOrMat; offdiagscale=one(T)) where {dim, T, M} + m = MMatrix{Int(√M), Int(√M), T}(undef) + @inbounds _tovoigt!(m, A, order; offdiagscale=offdiagscale) + return SMatrix(m) +end diff --git a/test/F64.jl b/test/F64.jl index d639bbab..7ddf814b 100644 --- a/test/F64.jl +++ b/test/F64.jl @@ -1,52 +1,52 @@ -# dummy type wrapping a Float64 used in tests -struct F64 <: AbstractFloat - x::Float64 -end -F64(x::F64) = x - -# operations -for op in (:+, :-) - @eval Base.$op(a::F64) = F64($op(a.x)) -end -for op in (:+, :-, :*, :/) - @eval Base.$op(a::F64, b::F64) = F64($op(a.x, b.x)) -end -for op in(:zero, :one) - @eval Base.$op(::Type{F64}) = F64($op(Float64)) -end - -using Random -Random.rand(rng::AbstractRNG, ::Random.SamplerTrivial{Random.CloseOpen01{F64}}) = F64(rand(rng, Float64)) -Random.randn(rng::Random.AbstractRNG, ::Type{F64}) = F64(randn(rng, Float64)) - -Base.sqrt(a::F64) = F64(sqrt(a.x)) - -# comparison -Base.isapprox(a::F64, b::F64) = isapprox(a.x, b.x) -Base.:<(a::F64, b::F64) = a.x < b.x -Base.:<=(a::F64, b::F64) = a.x <= b.x -Base.eps(::Type{F64}) = eps(Float64) - -# promotion -Base.promote_type(::Type{Float32}, ::Type{F64}) = Float64 # for eig -Base.promote_type(::Type{Float64}, ::Type{F64}) = Float64 # for vecnorm -Base.promote_type(::Type{Int64}, ::Type{F64}) = F64 # for eig in StaticArrays -Base.promote_type(::Type{F64}, ::Type{Int64}) = F64 # for eig in StaticArrays -Base.promote(a::F64, b::T) where {T <: Number} = a, F64(b) -Base.promote(a::T, b::F64) where {T <: Number} = F64(a), b -Base.convert(::Type{F64}, a::F64) = a -Base.convert(::Type{Float64}, a::F64) = a.x -Base.convert(::Type{F64}, a::T) where {T <: Number} = F64(a) - -# for testing of eigen -Base.acos(a::F64) = F64(acos(a.x)) -Base.cos(a::F64) = F64(cos(a.x)) -Base.sin(a::F64) = F64(sin(a.x)) -Base.sincos(a::F64) = (F64(sin(a.x)), F64(cos(a.x))) -Base.precision(::Type{F64}) = precision(Float64) -Base.floatmin(::Type{F64}) = floatmin(Float64) - -# Number type which is not <: Real (Tensors#154) -struct NotReal <: Number - x::Float64 -end +# dummy type wrapping a Float64 used in tests +struct F64 <: AbstractFloat + x::Float64 +end +F64(x::F64) = x + +# operations +for op in (:+, :-) + @eval Base.$op(a::F64) = F64($op(a.x)) +end +for op in (:+, :-, :*, :/) + @eval Base.$op(a::F64, b::F64) = F64($op(a.x, b.x)) +end +for op in(:zero, :one) + @eval Base.$op(::Type{F64}) = F64($op(Float64)) +end + +using Random +Random.rand(rng::AbstractRNG, ::Random.SamplerTrivial{Random.CloseOpen01{F64}}) = F64(rand(rng, Float64)) +Random.randn(rng::Random.AbstractRNG, ::Type{F64}) = F64(randn(rng, Float64)) + +Base.sqrt(a::F64) = F64(sqrt(a.x)) + +# comparison +Base.isapprox(a::F64, b::F64) = isapprox(a.x, b.x) +Base.:<(a::F64, b::F64) = a.x < b.x +Base.:<=(a::F64, b::F64) = a.x <= b.x +Base.eps(::Type{F64}) = eps(Float64) + +# promotion +Base.promote_type(::Type{Float32}, ::Type{F64}) = Float64 # for eig +Base.promote_type(::Type{Float64}, ::Type{F64}) = Float64 # for vecnorm +Base.promote_type(::Type{Int64}, ::Type{F64}) = F64 # for eig in StaticArrays +Base.promote_type(::Type{F64}, ::Type{Int64}) = F64 # for eig in StaticArrays +Base.promote(a::F64, b::T) where {T <: Number} = a, F64(b) +Base.promote(a::T, b::F64) where {T <: Number} = F64(a), b +Base.convert(::Type{F64}, a::F64) = a +Base.convert(::Type{Float64}, a::F64) = a.x +Base.convert(::Type{F64}, a::T) where {T <: Number} = F64(a) + +# for testing of eigen +Base.acos(a::F64) = F64(acos(a.x)) +Base.cos(a::F64) = F64(cos(a.x)) +Base.sin(a::F64) = F64(sin(a.x)) +Base.sincos(a::F64) = (F64(sin(a.x)), F64(cos(a.x))) +Base.precision(::Type{F64}) = precision(Float64) +Base.floatmin(::Type{F64}) = floatmin(Float64) + +# Number type which is not <: Real (Tensors#154) +struct NotReal <: Number + x::Float64 +end diff --git a/test/coverage/Project.toml b/test/coverage/Project.toml index 4fbdc477..d0c5532f 100644 --- a/test/coverage/Project.toml +++ b/test/coverage/Project.toml @@ -1,2 +1,2 @@ -[deps] -Coverage = "a2441757-f6aa-5fb2-8edb-039e3f45d037" +[deps] +Coverage = "a2441757-f6aa-5fb2-8edb-039e3f45d037" diff --git a/test/runtests.jl b/test/runtests.jl index 99487afe..30b9eb5a 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,27 +1,27 @@ -using Tensors -using Test -using TimerOutputs -using LinearAlgebra -using Random -using Statistics: mean -using StaticArrays - -macro testsection(str, block) - return quote - @timeit "$($(esc(str)))" begin - @testset "$($(esc(str)))" begin - $(esc(block)) - end - end - end -end - -reset_timer!() - -include("F64.jl") -include("test_misc.jl") -include("test_ops.jl") -include("test_ad.jl") - -print_timer() -println() +using Tensors +using Test +using TimerOutputs +using LinearAlgebra +using Random +using Statistics: mean +using StaticArrays + +macro testsection(str, block) + return quote + @timeit "$($(esc(str)))" begin + @testset "$($(esc(str)))" begin + $(esc(block)) + end + end + end +end + +reset_timer!() + +include("F64.jl") +include("test_misc.jl") +include("test_ops.jl") +include("test_ad.jl") + +print_timer() +println() diff --git a/test/test_ad.jl b/test/test_ad.jl index bb696b4c..226ca13c 100644 --- a/test/test_ad.jl +++ b/test/test_ad.jl @@ -1,341 +1,341 @@ -using Tensors: ∇, ∇∇, Δ -import ForwardDiff - -function Ψ(C, μ, Kb) - detC = det(C) - J = sqrt(detC) - Ĉ = detC^(-1/3)*C - return 1/2*(μ * (tr(Ĉ)- 3) + Kb*(J-1)^2) -end - -function S(C, μ, Kb) - I = one(C) - J = sqrt(det(C)) - invC = inv(C) - return μ * det(C)^(-1/3)*(I - 1/3*tr(C)*invC) + Kb*(J-1)*J*invC -end - -const μ = 1e10; -const Kb = 1.66e11; -Ψ(C) = Ψ(C, μ, Kb) -S(C) = S(C, μ, Kb) - -@testsection "AD" begin - for dim in 1:3 - @testsection "dim $dim" begin - F = one(Tensor{2,dim}) + rand(Tensor{2,dim}); - C = tdot(F); - C2 = F' ⋅ F; - - @test 2(@inferred ∇(Ψ, C))::typeof(C) ≈ S(C) - @test 2(@inferred ∇(Ψ, C2))::typeof(C2) ≈ S(C2) - - b = rand(SymmetricTensor{2, dim}) - @test 2 * (@inferred ∇∇(Ψ, C))::SymmetricTensor{4, dim} ⊡ b ≈ ∇(S, C) ⊡ b - @test 2 * (@inferred ∇∇(Ψ, C2))::Tensor{4, dim} ⊡ b ≈ ∇(S, C2) ⊡ b - - @test ∇(Ψ, C) ≈ ∇(Ψ, C2) - @test ∇(S, C) ⊡ b ≈ ∇(S, C2) ⊡ b - @test ∇∇(Ψ, C) ⊡ b ≈ ∇∇(Ψ, C2) ⊡ b - - for T in (Float32, Float64) - Random.seed!(1234) # needed for getting "good" tensors for calculating det and friends - A = rand(Tensor{2, dim, T}) - B = rand(Tensor{2, dim, T}) - A_sym = rand(SymmetricTensor{2, dim, T}) - B_sym = rand(SymmetricTensor{2, dim, T}) - v = rand(Vec{dim, T}) - II = one(Tensor{4, dim, T}) - II_sym = one(SymmetricTensor{4, dim, T}) - - # Gradient of scalars - @testsection "scalar grad" begin - @test (@inferred ∇(norm, v))::typeof(v) ≈ ((@inferred ∇(norm, v, :all))[1])::typeof(v) ≈ v / norm(v) - @test (∇(norm, v, :all)[2])::T ≈ norm(v) - @test (@inferred ∇(norm, A))::typeof(A) ≈ ((@inferred ∇(norm, A, :all))[1])::typeof(A) ≈ A / norm(A) - @test (∇(norm, A, :all)[2])::T ≈ norm(A) - @test (@inferred ∇(norm, A_sym))::typeof(A_sym) ≈ ((@inferred ∇(norm, A_sym, :all))[1])::typeof(A_sym) ≈ A_sym / norm(A_sym) - @test (∇(norm, A_sym, :all)[2])::T ≈ norm(A_sym) - - @test ∇(v -> 3*v, v) ≈ ∇(v -> 3*v, v, :all)[1] ≈ diagm(Tensor{2, dim}, 3.0) - @test ∇(v -> 3*v, v, :all)[2] ≈ 3*v - # function does not return dual - @test ∇(A -> T(1), A)::typeof(A) ≈ ∇(A -> T(1), A, :all)[1] ≈ 0*A - @test ∇(A -> T(1), A, :all)[2] == 1 - @test ∇(A -> T(1), A_sym)::typeof(A_sym) ≈ ∇(A -> T(1), A, :all)[1] ≈ 0*A_sym - @test ∇(A -> T(1), A_sym, :all)[2] == 1 - end - - @testsection "2nd tensor grad" begin - # Gradient of second order tensors - # https://en.wikipedia.org/wiki/Tensor_derivative_(continuum_mechanics)#Derivatives_of_the_invariants_of_a_second-order_tensor - I1, DI1 = A -> tr(A), A -> one(A) - I2, DI2 = A -> (tr(A)^2 - tr(A⋅A)) / 2, A -> I1(A) * one(A) - A' - I3, DI3 = A -> det(A), A -> det(A) * inv(A)' - - @test (@inferred ∇(I1, A))::typeof(A) ≈ ((@inferred ∇(I1, A, :all))[1])::typeof(A) ≈ DI1(A) - @test (∇(I1, A, :all)[2])::T ≈ I1(A) - @test (@inferred ∇(I2, A))::typeof(A) ≈ ((@inferred ∇(I2, A, :all))[1])::typeof(A) ≈ DI2(A) - @test (∇(I2, A, :all)[2])::T ≈ I2(A) - @test (@inferred ∇(I3, A))::typeof(A) ≈ ((@inferred ∇(I3, A, :all))[1])::typeof(A) ≈ DI3(A) - @test (∇(I3, A, :all)[2])::T ≈ I3(A) - @test (@inferred ∇(I1, A_sym))::typeof(A_sym) ≈ ((@inferred ∇(I1, A_sym, :all))[1])::typeof(A_sym) ≈ DI1(A_sym) - @test (∇(I1, A_sym, :all)[2])::T ≈ I1(A_sym) - @test (@inferred ∇(I2, A_sym))::typeof(A_sym) ≈ ((@inferred ∇(I2, A_sym, :all))[1])::typeof(A_sym) ≈ DI2(A_sym) - @test (∇(I2, A_sym, :all)[2])::T ≈ I2(A_sym) - @test (@inferred ∇(I3, A_sym))::typeof(A_sym) ≈ ((@inferred ∇(I3, A_sym, :all))[1])::typeof(A_sym) ≈ DI3(A_sym) - @test (∇(I3, A_sym, :all)[2])::T ≈ I3(A_sym) - - @test (@inferred ∇(identity, A))::Tensor{4, dim, T} ≈ ((@inferred ∇(identity, A, :all))[1])::Tensor{4, dim, T} ≈ II - @test (∇(identity, A, :all)[2])::typeof(A) ≈ A - @test (@inferred ∇(identity, A_sym))::SymmetricTensor{4, dim, T} ≈ ((@inferred ∇(identity, A_sym, :all))[1])::SymmetricTensor{4, dim, T} ≈ II_sym - @test (∇(identity, A_sym, :all)[2])::typeof(A_sym) ≈ A_sym - @test (@inferred ∇(transpose, A))::Tensor{4, dim, T} ⊡ B ≈ ((@inferred ∇(transpose, A, :all))[1])::Tensor{4, dim, T} ⊡ B ≈ B' - @test (∇(transpose, A, :all)[2])::typeof(A) ≈ A' - @test (@inferred ∇(transpose, A_sym))::SymmetricTensor{4, dim, T} ⊡ B_sym ≈ ((@inferred ∇(transpose, A_sym, :all))[1])::SymmetricTensor{4, dim, T} ⊡ B_sym ≈ B_sym' - @test (∇(transpose, A_sym, :all)[2])::typeof(A_sym) ≈ A_sym' - @test (@inferred ∇(inv, A))::Tensor{4, dim, T} ⊡ B ≈ ((@inferred ∇(inv, A, :all))[1])::Tensor{4, dim, T} ⊡ B ≈ - inv(A) ⋅ B ⋅ inv(A) - @test (∇(inv, A, :all)[2])::typeof(A) ≈ inv(A) - @test (@inferred ∇(inv, A_sym))::SymmetricTensor{4, dim, T} ⊡ B_sym ≈ ((@inferred ∇(inv, A_sym, :all))[1])::SymmetricTensor{4, dim, T} ⊡ B_sym ≈ - inv(A_sym) ⋅ B_sym ⋅ inv(A_sym) - @test (∇(inv, A_sym, :all)[2])::typeof(A_sym) ≈ inv(A_sym) - # function does not return dual - @test ∇(A -> B, A) ≈ ∇(A -> B, A, :all)[1] ≈ zero(Tensor{4, dim, T}) - @test ∇(A -> B, A, :all)[2] ≈ B - @test isa(∇(A -> B, A), Tensor{4, dim, T}) - end - - # Hessians of scalars - @testsection "hessian" begin - @test (@inferred ∇∇(norm, A))::Tensor{4, dim, T} ≈ ((@inferred ∇∇(norm, A, :all))[1])::Tensor{4, dim, T} ≈ reshape(ForwardDiff.hessian(x -> sqrt(sum(abs2, x)), A), (dim, dim, dim, dim)) - @test (∇∇(norm, A, :all)[2])::typeof(A) ≈ reshape(ForwardDiff.gradient(x -> sqrt(sum(abs2, x)), A), (dim, dim)) - @test (∇∇(norm, A, :all)[3])::T ≈ norm(A) - @test (@inferred ∇∇(norm, A_sym))::SymmetricTensor{4, dim, T} ≈ ((@inferred ∇∇(norm, A_sym, :all))[1])::SymmetricTensor{4, dim, T} - @test (∇∇(norm, A_sym, :all)[2])::typeof(A_sym) ≈ ∇(norm, A_sym) - @test (∇∇(norm, A_sym, :all)[3])::T ≈ norm(A_sym) - # function does not return dual - @test ∇∇(A -> T(1), A)::Tensor{4, dim, T} ≈ ∇∇(A -> T(1), A, :all)[1] ≈ 0*II - @test ∇∇(A -> T(1), A, :all)[2] ≈ 0*A - @test ∇∇(A -> T(1), A, :all)[3] == T(1) - end - @testsection "3rd order" begin - δ(i,j) = (i==j ? 1 : 0) - # - f1(x::Vec) = x⊗x - df1(x::Vec{d}) where d = Tensor{3,d}((i,j,k)-> δ(i,k)*x[j] + δ(j,k)*x[i]); - for dim in 1:3 - z = rand(Vec{dim}) - @test gradient(f1, z) ≈ df1(z) - end - end - end # loop T - end # testsection - end # loop dim - - @testsection "vector calculus identities" begin - Random.seed!(1234) - φ(x) = norm(x)^4 - ϕ(x) = sum(x) - A(x) = Vec{3}((x[1]*x[2]^3*x[3], x[1]*x[2]*x[3]^3, x[1]^3*x[2]*x[3])) - B(x) = Vec{3}((x[1]*x[1], x[1]*x[2], x[1]*x[3])) - for T in (Float32, Float64) - x = rand(Vec{3, T}) - # gradient - @test gradient(x -> φ(x) + ϕ(x), x) ≈ gradient(φ, x) + gradient(ϕ, x) - @test gradient(x -> φ(x) * ϕ(x), x) ≈ φ(x) * gradient(ϕ, x) + gradient(φ, x) * ϕ(x) - @test gradient(x -> A(x) ⋅ B(x), x) ≈ gradient(B, x)⋅A(x) + gradient(A, x)⋅B(x) + A(x)×curl(B, x) + B(x)×curl(A, x) - # divergence - @test divergence(x -> A(x) + B(x), x) ≈ divergence(A, x) + divergence(B, x) - @test divergence(x -> φ(x) * A(x), x) ≈ φ(x)*divergence(A, x) + gradient(φ, x)⋅A(x) - @test divergence(x -> A(x) × B(x), x) ≈ B(x)⋅curl(A, x) - A(x)⋅curl(B, x) - # curl - @test curl(x -> A(x) + B(x), x) ≈ curl(A, x) + curl(B, x) - @test curl(x -> φ(x) * A(x), x) ≈ φ(x)*curl(A, x) + gradient(φ, x)×A(x) - @test curl(x -> A(x) × B(x), x) ≈ A(x)*divergence(B, x) - B(x)*divergence(A, x) + gradient(A, x)⋅B(x) - gradient(B, x)⋅A(x) - # second derivatives - @test divergence(x -> curl(A, x), x) ≈ 0 atol = eps(T) - @test curl(x -> gradient(φ, x), x) ≈ zero(Vec{3}) atol = 10eps(T) - @test divergence(x -> gradient(φ, x), x) ≈ laplace(φ, x) - @test gradient(x -> divergence(A, x), x) ≈ curl(x -> curl(A, x), x) + laplace.(A, x) - @test divergence(x -> ϕ(x)*gradient(φ, x), x) ≈ ϕ(x)*laplace(φ, x) + gradient(ϕ, x)⋅gradient(φ, x) - @test divergence(x -> φ(x)*gradient(ϕ, x) - gradient(φ, x)*ϕ(x), x) ≈ φ(x)*laplace(ϕ, x) - laplace(φ, x)*ϕ(x) - @test laplace(x -> ϕ(x)*φ(x), x) ≈ laplace(ϕ, x)*φ(x) + 2*gradient(ϕ, x)⋅gradient(φ, x) + ϕ(x)*laplace(φ, x) - @test laplace.(x -> φ(x)*A(x), x) ≈ A(x)*laplace(φ, x) + 2*(gradient(A, x)⋅gradient(φ, x)) + φ(x)*laplace.(A, x) - @test laplace(x -> A(x)⋅B(x), x) ≈ A(x)⋅laplace.(B, x) - B(x)⋅laplace.(A, x) + 2*divergence(x -> gradient(A, x)⋅B(x) + B(x)×curl(A, x), x) - # third derivatives - @test laplace.(x -> gradient(φ, x), x) ≈ gradient(x -> divergence(x -> gradient(φ, x), x), x) ≈ gradient(x -> laplace(φ, x), x) - @test laplace(x -> divergence(A, x), x) ≈ divergence(x -> gradient(x->divergence(A, x), x), x) ≈ divergence(x -> laplace.(A, x), x) - @test laplace.(x -> curl(A, x), x) ≈ -curl(x -> curl(x -> curl(A, x), x), x) ≈ curl(x -> laplace.(A, x), x) - end # loop T - end # testsection - - @testsection "f: scalar -> scalar" begin - f(x) = 2 * x^3 - x = 3.0 - @test gradient(f, x) ≈ gradient(f, x, :all)[1] ≈ 6 * x^2 - @test gradient(f, x, :all)[2] ≈ f(x) ≈ 2 * x^3 - @test hessian(f, x) ≈ hessian(f, x, :all)[1] ≈ 12 * x - @test hessian(f, x, :all)[2] ≈ gradient(f, x) ≈ 6 * x^2 - @test hessian(f, x, :all)[3] ≈ gradient(f, x, :all)[2] ≈ 2 * x^3 - end - @testsection "f: scalar -> AbstractTensor" begin - for dim in 1:3, S in (rand(Vec{dim}), rand(Tensor{2,dim}), rand(SymmetricTensor{2,dim})) - f(x) = x^3 * S - x = 3.0 - @test gradient(f, x) ≈ gradient(f, x, :all)[1] ≈ 3 * x^2 * S - @test gradient(f, x, :all)[2] ≈ f(x) ≈ x^3 * S - @test hessian(f, x) ≈ hessian(f, x, :all)[1] ≈ 6 * x * S - @test hessian(f, x, :all)[2] ≈ gradient(f, x) ≈ 3 * x^2 * S - @test hessian(f, x, :all)[3] ≈ f(x) ≈ x^3 * S - end - end - @testsection "mixed scalar/vec" begin - f(v::Vec, s::Number) = s * v[1] * v[2] - v = Vec((2.0, 3.0)) - s = 4.0 - @test gradient(x -> f(v, x), s)::Float64 ≈ v[1] * v[2] - @test gradient(x -> f(x, s), v)::Vec{2} ≈ Vec((s * v[2], s * v[1])) - @test gradient(y -> gradient(x -> f(y, x), s), v)::Vec{2} ≈ Vec((v[2], v[1])) - @test gradient(y -> gradient(x -> f(x, y), v), s)::Vec{2} ≈ Vec((v[2], v[1])) - end - - - @testsection "analytical gradient implementation" begin - # Consider the function f(g(x)), we need to test the following variations to cover all cases - # * x::Number - # - g::Number, f:: Number, Vec, Tensor{2}, SymmetricTensor{2} - # - g::Vec, f:: Number, Vec - # - g::Tensor{2}, f:: Number, Tensor{2} - # - g::SymmetricTensor{2}, f:: Number, SymmetricTensor{2} - # * x::Vec - # - g::Number, f:: Number, Vec - # - g::Vec, f:: Number, Vec - # * x::Tensor{2} - # - g::Number, f:: Number, Tensor{2} - # - g::Tensor{2}, f:: Number, Tensor{2} - # * x::SymmetricTensor{2} - # - g::Number, f:: Number, SymmetricTensor{2} - # - g::SymmetricTensor{2}, f:: Number, SymmetricTensor{2} - # - # Define the different types of functions required to test the cases above. Naming convention: - # tm_tn: Function taking in tensor of order n and outputting tensor of order m. (Number=0, SymmetricTensor{2}=s) - # _ana: Suffix for the function for which the analytical derivative is implemented for - # Define this function with AbstractFloat input to give error if not properly overloaded for Dual - # as `isa(ForwardDiff.Dual(1, 1), AbstractFloat) = false` - for dim = 1:3 - - a0 = rand(); a1 = rand(Vec{dim}); a2 = rand(Tensor{2,dim}); as = rand(SymmetricTensor{2,dim}) - x0 = rand(); x1 = rand(Vec{dim}); x2 = rand(Tensor{2,dim}); xs = rand(SymmetricTensor{2,dim}) - - # Scalar input (t0) - t0_t0(x) = sin(x); t0_t0_ana(x::AbstractFloat) = t0_t0(x); d_t0_t0(x_) = reverse(gradient(t0_t0, x_, :all)); @implement_gradient t0_t0_ana d_t0_t0 - t1_t0(x) = x*a1; t1_t0_ana(x::AbstractFloat) = t1_t0(x); d_t1_t0(x_) = reverse(gradient(t1_t0, x_, :all)); @implement_gradient t1_t0_ana d_t1_t0 - t2_t0(x) = x*a2; t2_t0_ana(x::AbstractFloat) = t2_t0(x); d_t2_t0(x_) = reverse(gradient(t2_t0, x_, :all)); @implement_gradient t2_t0_ana d_t2_t0 - ts_t0(x) = x*as; ts_t0_ana(x::AbstractFloat) = ts_t0(x); d_ts_t0(x_) = reverse(gradient(ts_t0, x_, :all)); @implement_gradient ts_t0_ana d_ts_t0 - - @test gradient(t0_t0, x0) ≈ gradient(t0_t0_ana, x0) - @test gradient(t1_t0, x0) ≈ gradient(t1_t0_ana, x0) - @test gradient(t2_t0, x0) ≈ gradient(t2_t0_ana, x0) - @test gradient(ts_t0, x0) ≈ gradient(ts_t0_ana, x0) - - # Vector input (t1) - t0_t1(x) = norm(x); t0_t1_ana(x::Vec{<:Any,<:AbstractFloat}) = t0_t1(x); d_t0_t1(x_) = reverse(gradient(t0_t1, x_, :all)); @implement_gradient t0_t1_ana d_t0_t1 - t1_t1(x) = a0*x; t1_t1_ana(x::Vec{<:Any,<:AbstractFloat}) = t1_t1(x); d_t1_t1(x_) = reverse(gradient(t1_t1, x_, :all)); @implement_gradient t1_t1_ana d_t1_t1 - - @test gradient(t0_t1, x1) ≈ gradient(t0_t1_ana, x1) - @test gradient(t1_t1, x1) ≈ gradient(t1_t1_ana, x1) - - # 2nd order tensor input (t2) - t0_t2(x) = norm(x); t0_t2_ana(x::Tensor{2,<:Any,<:AbstractFloat}) = t0_t2(x); d_t0_t2(x_) = reverse(gradient(t0_t2, x_, :all)); @implement_gradient t0_t2_ana d_t0_t2 - t2_t2(x) = dot(x,x); t2_t2_ana(x::Tensor{2,<:Any,<:AbstractFloat}) = t2_t2(x); d_t2_t2(x_) = reverse(gradient(t2_t2, x_, :all)); @implement_gradient t2_t2_ana d_t2_t2 - - @test gradient(t0_t2, x2) ≈ gradient(t0_t2_ana, x2) - @test gradient(t2_t2, x2) ≈ gradient(t2_t2_ana, x2) - - # 2nd order symmetric tensor input (ts) - t0_ts(x) = norm(x); t0_ts_ana(x::SymmetricTensor{2,<:Any,<:AbstractFloat}) = t0_ts(x); d_t0_ts(x_) = reverse(gradient(t0_ts, x_, :all)); @implement_gradient t0_ts_ana d_t0_ts - ts_ts(x) = dot(x); ts_ts_ana(x::SymmetricTensor{2,<:Any,<:AbstractFloat}) = ts_ts(x); d_ts_ts(x_) = reverse(gradient(ts_ts, x_, :all)); @implement_gradient ts_ts_ana d_ts_ts - - @test gradient(t0_ts, xs) ≈ gradient(t0_ts_ana, xs) - @test gradient(ts_ts, xs) ≈ gradient(ts_ts_ana, xs) - - # Test combined functions. The important is that another function is called first! - # f(g(x)): naming "f_g_x" - # x scalar, g scalar, f: scalar,Vec,Tensor{2},SymmetricTensor{2}: - t0_t0_t0(x) = t0_t0(t0_t0(x)); t0_t0_t0_ana(x) = t0_t0_ana(t0_t0(x)) - t1_t0_t0(x) = t1_t0(t0_t0(x)); t1_t0_t0_ana(x) = t1_t0_ana(t0_t0(x)) - t2_t0_t0(x) = t2_t0(t0_t0(x)); t2_t0_t0_ana(x) = t2_t0_ana(t0_t0(x)) - ts_t0_t0(x) = ts_t0(t0_t0(x)); ts_t0_t0_ana(x) = ts_t0_ana(t0_t0(x)) - - @test gradient(t0_t0_t0, x0) ≈ gradient(t0_t0_t0_ana, x0) - @test gradient(t1_t0_t0, x0) ≈ gradient(t1_t0_t0_ana, x0) - @test gradient(t2_t0_t0, x0) ≈ gradient(t2_t0_t0_ana, x0) - @test gradient(ts_t0_t0, x0) ≈ gradient(ts_t0_t0_ana, x0) - - # f(g(x)): x scalar, g vector, f: scalar,vector - t0_t1_t0(x) = t0_t1(t1_t0(x)); t0_t1_t0_ana(x) = t0_t1_ana(t1_t0(x)) - t1_t1_t0(x) = t1_t1(t1_t0(x)); t1_t1_t0_ana(x) = t1_t1_ana(t1_t0(x)) - - @test gradient(t0_t1_t0, x0) ≈ gradient(t0_t1_t0_ana, x0) - @test gradient(t1_t1_t0, x0) ≈ gradient(t1_t1_t0_ana, x0) - - # f(g(x)): x scalar, g Tensor{2}, f:scalar, Tensor{2} - t0_t2_t0(x) = t0_t2(t2_t0(x)); t0_t2_t0_ana(x) = t0_t2_ana(t2_t0(x)) - t2_t2_t0(x) = t2_t2(t2_t0(x)); t2_t2_t0_ana(x) = t2_t2_ana(t2_t0(x)); - - @test gradient(t0_t2_t0, x0) ≈ gradient(t0_t2_t0_ana, x0) - @test gradient(t2_t2_t0, x0) ≈ gradient(t2_t2_t0_ana, x0) - - # f(g(x)): x scalar, g SymmetricTensor{2}, f:scalar, SymmetricTensor{2} - t0_ts_t0(x) = t0_ts(ts_t0(x)); t0_ts_t0_ana(x) = t0_ts_ana(ts_t0(x)) - ts_ts_t0(x) = ts_ts(ts_t0(x)); ts_ts_t0_ana(x) = ts_ts_ana(ts_t0(x)) - - @test gradient(t0_ts_t0, x0) ≈ gradient(t0_ts_t0_ana, x0) - @test gradient(ts_ts_t0, x0) ≈ gradient(ts_ts_t0_ana, x0) - - # x Vec, g scalar, f: scalar, Vec - t0_t0_t1(x) = t0_t0(t0_t1(x)); t0_t0_t1_ana(x) = t0_t0_ana(t0_t1(x)) - t1_t0_t1(x) = t1_t0(t0_t1(x)); t1_t0_t1_ana(x) = t1_t0_ana(t0_t1(x)) - - @test gradient(t1_t0_t1, x1) ≈ gradient(t1_t0_t1_ana, x1) - @test gradient(t1_t0_t1, x1) ≈ gradient(t1_t0_t1_ana, x1) - - # x Vec, g Vec, f: scalar, Vec - t0_t1_t1(x) = t0_t1(t1_t1(x)); t0_t1_t1_ana(x) = t0_t1_ana(t1_t1(x)) - t1_t1_t1(x) = t1_t1(t1_t1(x)); t1_t1_t1_ana(x) = t1_t1_ana(t1_t1(x)) - - @test gradient(t0_t1_t1, x1) ≈ gradient(t0_t1_t1_ana, x1) - @test gradient(t1_t1_t1, x1) ≈ gradient(t1_t1_t1_ana, x1) - - # x Tensor{2}, g scalar, f: scalar, Tensor{2} - t0_t0_t2(x) = t0_t0(t0_t1(x)); t0_t0_t2_ana(x) = t0_t0_ana(t0_t2(x)) - t2_t0_t2(x) = t2_t0(t0_t2(x)); t2_t0_t2_ana(x) = t2_t0_ana(t0_t2(x)) - - @test gradient(t0_t0_t2, x2) ≈ gradient(t0_t0_t2_ana, x2) - @test gradient(t2_t0_t2, x2) ≈ gradient(t2_t0_t2_ana, x2) - - # x Tensor{2}, g Tensor{2}, f: scalar, Tensor{2} - t0_t2_t2(x) = t0_t2(t2_t2(x)); t0_t2_t2_ana(x) = t0_t2_ana(t2_t2(x)) - t2_t2_t2(x) = t2_t2(t2_t2(x)); t2_t2_t2_ana(x) = t2_t2_ana(t2_t2(x)) - - @test gradient(t0_t2_t2, x2) ≈ gradient(t0_t2_t2_ana, x2) - @test gradient(t2_t2_t2, x2) ≈ gradient(t2_t2_t2_ana, x2) - - # x SymmetricTensor{2}, g scalar, f: scalar, SymmetricTensor{2} - t0_t0_ts(x) = t0_t0(t0_t1(x)); t0_t0_ts_ana(x) = t0_t0_ana(t0_ts(x)) - ts_t0_ts(x) = ts_t0(t0_ts(x)); ts_t0_ts_ana(x) = ts_t0_ana(t0_ts(x)) - - @test gradient(t0_t0_ts, xs) ≈ gradient(t0_t0_ts_ana, xs) - @test gradient(ts_t0_ts, xs) ≈ gradient(ts_t0_ts_ana, xs) - - # x SymmetricTensor{2}, g SymmetricTensor{2}, f: scalar, SymmetricTensor{2} - t0_ts_ts(x) = t0_ts(ts_ts(x)); t0_ts_ts_ana(x) = t0_ts_ana(ts_ts(x)) - ts_ts_ts(x) = ts_ts(ts_ts(x)); ts_ts_ts_ana(x) = ts_ts_ana(ts_ts(x)) - - @test gradient(t0_ts_ts, xs) ≈ gradient(t0_ts_ts_ana, xs) - @test gradient(ts_ts_ts, xs) ≈ gradient(ts_ts_ts_ana, xs) - - end - - end - -end # testsection +using Tensors: ∇, ∇∇, Δ +import ForwardDiff + +function Ψ(C, μ, Kb) + detC = det(C) + J = sqrt(detC) + Ĉ = detC^(-1/3)*C + return 1/2*(μ * (tr(Ĉ)- 3) + Kb*(J-1)^2) +end + +function S(C, μ, Kb) + I = one(C) + J = sqrt(det(C)) + invC = inv(C) + return μ * det(C)^(-1/3)*(I - 1/3*tr(C)*invC) + Kb*(J-1)*J*invC +end + +const μ = 1e10; +const Kb = 1.66e11; +Ψ(C) = Ψ(C, μ, Kb) +S(C) = S(C, μ, Kb) + +@testsection "AD" begin + for dim in 1:3 + @testsection "dim $dim" begin + F = one(Tensor{2,dim}) + rand(Tensor{2,dim}); + C = tdot(F); + C2 = F' ⋅ F; + + @test 2(@inferred ∇(Ψ, C))::typeof(C) ≈ S(C) + @test 2(@inferred ∇(Ψ, C2))::typeof(C2) ≈ S(C2) + + b = rand(SymmetricTensor{2, dim}) + @test 2 * (@inferred ∇∇(Ψ, C))::SymmetricTensor{4, dim} ⊡ b ≈ ∇(S, C) ⊡ b + @test 2 * (@inferred ∇∇(Ψ, C2))::Tensor{4, dim} ⊡ b ≈ ∇(S, C2) ⊡ b + + @test ∇(Ψ, C) ≈ ∇(Ψ, C2) + @test ∇(S, C) ⊡ b ≈ ∇(S, C2) ⊡ b + @test ∇∇(Ψ, C) ⊡ b ≈ ∇∇(Ψ, C2) ⊡ b + + for T in (Float32, Float64) + Random.seed!(1234) # needed for getting "good" tensors for calculating det and friends + A = rand(Tensor{2, dim, T}) + B = rand(Tensor{2, dim, T}) + A_sym = rand(SymmetricTensor{2, dim, T}) + B_sym = rand(SymmetricTensor{2, dim, T}) + v = rand(Vec{dim, T}) + II = one(Tensor{4, dim, T}) + II_sym = one(SymmetricTensor{4, dim, T}) + + # Gradient of scalars + @testsection "scalar grad" begin + @test (@inferred ∇(norm, v))::typeof(v) ≈ ((@inferred ∇(norm, v, :all))[1])::typeof(v) ≈ v / norm(v) + @test (∇(norm, v, :all)[2])::T ≈ norm(v) + @test (@inferred ∇(norm, A))::typeof(A) ≈ ((@inferred ∇(norm, A, :all))[1])::typeof(A) ≈ A / norm(A) + @test (∇(norm, A, :all)[2])::T ≈ norm(A) + @test (@inferred ∇(norm, A_sym))::typeof(A_sym) ≈ ((@inferred ∇(norm, A_sym, :all))[1])::typeof(A_sym) ≈ A_sym / norm(A_sym) + @test (∇(norm, A_sym, :all)[2])::T ≈ norm(A_sym) + + @test ∇(v -> 3*v, v) ≈ ∇(v -> 3*v, v, :all)[1] ≈ diagm(Tensor{2, dim}, 3.0) + @test ∇(v -> 3*v, v, :all)[2] ≈ 3*v + # function does not return dual + @test ∇(A -> T(1), A)::typeof(A) ≈ ∇(A -> T(1), A, :all)[1] ≈ 0*A + @test ∇(A -> T(1), A, :all)[2] == 1 + @test ∇(A -> T(1), A_sym)::typeof(A_sym) ≈ ∇(A -> T(1), A, :all)[1] ≈ 0*A_sym + @test ∇(A -> T(1), A_sym, :all)[2] == 1 + end + + @testsection "2nd tensor grad" begin + # Gradient of second order tensors + # https://en.wikipedia.org/wiki/Tensor_derivative_(continuum_mechanics)#Derivatives_of_the_invariants_of_a_second-order_tensor + I1, DI1 = A -> tr(A), A -> one(A) + I2, DI2 = A -> (tr(A)^2 - tr(A⋅A)) / 2, A -> I1(A) * one(A) - A' + I3, DI3 = A -> det(A), A -> det(A) * inv(A)' + + @test (@inferred ∇(I1, A))::typeof(A) ≈ ((@inferred ∇(I1, A, :all))[1])::typeof(A) ≈ DI1(A) + @test (∇(I1, A, :all)[2])::T ≈ I1(A) + @test (@inferred ∇(I2, A))::typeof(A) ≈ ((@inferred ∇(I2, A, :all))[1])::typeof(A) ≈ DI2(A) + @test (∇(I2, A, :all)[2])::T ≈ I2(A) + @test (@inferred ∇(I3, A))::typeof(A) ≈ ((@inferred ∇(I3, A, :all))[1])::typeof(A) ≈ DI3(A) + @test (∇(I3, A, :all)[2])::T ≈ I3(A) + @test (@inferred ∇(I1, A_sym))::typeof(A_sym) ≈ ((@inferred ∇(I1, A_sym, :all))[1])::typeof(A_sym) ≈ DI1(A_sym) + @test (∇(I1, A_sym, :all)[2])::T ≈ I1(A_sym) + @test (@inferred ∇(I2, A_sym))::typeof(A_sym) ≈ ((@inferred ∇(I2, A_sym, :all))[1])::typeof(A_sym) ≈ DI2(A_sym) + @test (∇(I2, A_sym, :all)[2])::T ≈ I2(A_sym) + @test (@inferred ∇(I3, A_sym))::typeof(A_sym) ≈ ((@inferred ∇(I3, A_sym, :all))[1])::typeof(A_sym) ≈ DI3(A_sym) + @test (∇(I3, A_sym, :all)[2])::T ≈ I3(A_sym) + + @test (@inferred ∇(identity, A))::Tensor{4, dim, T} ≈ ((@inferred ∇(identity, A, :all))[1])::Tensor{4, dim, T} ≈ II + @test (∇(identity, A, :all)[2])::typeof(A) ≈ A + @test (@inferred ∇(identity, A_sym))::SymmetricTensor{4, dim, T} ≈ ((@inferred ∇(identity, A_sym, :all))[1])::SymmetricTensor{4, dim, T} ≈ II_sym + @test (∇(identity, A_sym, :all)[2])::typeof(A_sym) ≈ A_sym + @test (@inferred ∇(transpose, A))::Tensor{4, dim, T} ⊡ B ≈ ((@inferred ∇(transpose, A, :all))[1])::Tensor{4, dim, T} ⊡ B ≈ B' + @test (∇(transpose, A, :all)[2])::typeof(A) ≈ A' + @test (@inferred ∇(transpose, A_sym))::SymmetricTensor{4, dim, T} ⊡ B_sym ≈ ((@inferred ∇(transpose, A_sym, :all))[1])::SymmetricTensor{4, dim, T} ⊡ B_sym ≈ B_sym' + @test (∇(transpose, A_sym, :all)[2])::typeof(A_sym) ≈ A_sym' + @test (@inferred ∇(inv, A))::Tensor{4, dim, T} ⊡ B ≈ ((@inferred ∇(inv, A, :all))[1])::Tensor{4, dim, T} ⊡ B ≈ - inv(A) ⋅ B ⋅ inv(A) + @test (∇(inv, A, :all)[2])::typeof(A) ≈ inv(A) + @test (@inferred ∇(inv, A_sym))::SymmetricTensor{4, dim, T} ⊡ B_sym ≈ ((@inferred ∇(inv, A_sym, :all))[1])::SymmetricTensor{4, dim, T} ⊡ B_sym ≈ - inv(A_sym) ⋅ B_sym ⋅ inv(A_sym) + @test (∇(inv, A_sym, :all)[2])::typeof(A_sym) ≈ inv(A_sym) + # function does not return dual + @test ∇(A -> B, A) ≈ ∇(A -> B, A, :all)[1] ≈ zero(Tensor{4, dim, T}) + @test ∇(A -> B, A, :all)[2] ≈ B + @test isa(∇(A -> B, A), Tensor{4, dim, T}) + end + + # Hessians of scalars + @testsection "hessian" begin + @test (@inferred ∇∇(norm, A))::Tensor{4, dim, T} ≈ ((@inferred ∇∇(norm, A, :all))[1])::Tensor{4, dim, T} ≈ reshape(ForwardDiff.hessian(x -> sqrt(sum(abs2, x)), A), (dim, dim, dim, dim)) + @test (∇∇(norm, A, :all)[2])::typeof(A) ≈ reshape(ForwardDiff.gradient(x -> sqrt(sum(abs2, x)), A), (dim, dim)) + @test (∇∇(norm, A, :all)[3])::T ≈ norm(A) + @test (@inferred ∇∇(norm, A_sym))::SymmetricTensor{4, dim, T} ≈ ((@inferred ∇∇(norm, A_sym, :all))[1])::SymmetricTensor{4, dim, T} + @test (∇∇(norm, A_sym, :all)[2])::typeof(A_sym) ≈ ∇(norm, A_sym) + @test (∇∇(norm, A_sym, :all)[3])::T ≈ norm(A_sym) + # function does not return dual + @test ∇∇(A -> T(1), A)::Tensor{4, dim, T} ≈ ∇∇(A -> T(1), A, :all)[1] ≈ 0*II + @test ∇∇(A -> T(1), A, :all)[2] ≈ 0*A + @test ∇∇(A -> T(1), A, :all)[3] == T(1) + end + @testsection "3rd order" begin + δ(i,j) = (i==j ? 1 : 0) + # + f1(x::Vec) = x⊗x + df1(x::Vec{d}) where d = Tensor{3,d}((i,j,k)-> δ(i,k)*x[j] + δ(j,k)*x[i]); + for dim in 1:3 + z = rand(Vec{dim}) + @test gradient(f1, z) ≈ df1(z) + end + end + end # loop T + end # testsection + end # loop dim + + @testsection "vector calculus identities" begin + Random.seed!(1234) + φ(x) = norm(x)^4 + ϕ(x) = sum(x) + A(x) = Vec{3}((x[1]*x[2]^3*x[3], x[1]*x[2]*x[3]^3, x[1]^3*x[2]*x[3])) + B(x) = Vec{3}((x[1]*x[1], x[1]*x[2], x[1]*x[3])) + for T in (Float32, Float64) + x = rand(Vec{3, T}) + # gradient + @test gradient(x -> φ(x) + ϕ(x), x) ≈ gradient(φ, x) + gradient(ϕ, x) + @test gradient(x -> φ(x) * ϕ(x), x) ≈ φ(x) * gradient(ϕ, x) + gradient(φ, x) * ϕ(x) + @test gradient(x -> A(x) ⋅ B(x), x) ≈ gradient(B, x)⋅A(x) + gradient(A, x)⋅B(x) + A(x)×curl(B, x) + B(x)×curl(A, x) + # divergence + @test divergence(x -> A(x) + B(x), x) ≈ divergence(A, x) + divergence(B, x) + @test divergence(x -> φ(x) * A(x), x) ≈ φ(x)*divergence(A, x) + gradient(φ, x)⋅A(x) + @test divergence(x -> A(x) × B(x), x) ≈ B(x)⋅curl(A, x) - A(x)⋅curl(B, x) + # curl + @test curl(x -> A(x) + B(x), x) ≈ curl(A, x) + curl(B, x) + @test curl(x -> φ(x) * A(x), x) ≈ φ(x)*curl(A, x) + gradient(φ, x)×A(x) + @test curl(x -> A(x) × B(x), x) ≈ A(x)*divergence(B, x) - B(x)*divergence(A, x) + gradient(A, x)⋅B(x) - gradient(B, x)⋅A(x) + # second derivatives + @test divergence(x -> curl(A, x), x) ≈ 0 atol = eps(T) + @test curl(x -> gradient(φ, x), x) ≈ zero(Vec{3}) atol = 10eps(T) + @test divergence(x -> gradient(φ, x), x) ≈ laplace(φ, x) + @test gradient(x -> divergence(A, x), x) ≈ curl(x -> curl(A, x), x) + laplace.(A, x) + @test divergence(x -> ϕ(x)*gradient(φ, x), x) ≈ ϕ(x)*laplace(φ, x) + gradient(ϕ, x)⋅gradient(φ, x) + @test divergence(x -> φ(x)*gradient(ϕ, x) - gradient(φ, x)*ϕ(x), x) ≈ φ(x)*laplace(ϕ, x) - laplace(φ, x)*ϕ(x) + @test laplace(x -> ϕ(x)*φ(x), x) ≈ laplace(ϕ, x)*φ(x) + 2*gradient(ϕ, x)⋅gradient(φ, x) + ϕ(x)*laplace(φ, x) + @test laplace.(x -> φ(x)*A(x), x) ≈ A(x)*laplace(φ, x) + 2*(gradient(A, x)⋅gradient(φ, x)) + φ(x)*laplace.(A, x) + @test laplace(x -> A(x)⋅B(x), x) ≈ A(x)⋅laplace.(B, x) - B(x)⋅laplace.(A, x) + 2*divergence(x -> gradient(A, x)⋅B(x) + B(x)×curl(A, x), x) + # third derivatives + @test laplace.(x -> gradient(φ, x), x) ≈ gradient(x -> divergence(x -> gradient(φ, x), x), x) ≈ gradient(x -> laplace(φ, x), x) + @test laplace(x -> divergence(A, x), x) ≈ divergence(x -> gradient(x->divergence(A, x), x), x) ≈ divergence(x -> laplace.(A, x), x) + @test laplace.(x -> curl(A, x), x) ≈ -curl(x -> curl(x -> curl(A, x), x), x) ≈ curl(x -> laplace.(A, x), x) + end # loop T + end # testsection + + @testsection "f: scalar -> scalar" begin + f(x) = 2 * x^3 + x = 3.0 + @test gradient(f, x) ≈ gradient(f, x, :all)[1] ≈ 6 * x^2 + @test gradient(f, x, :all)[2] ≈ f(x) ≈ 2 * x^3 + @test hessian(f, x) ≈ hessian(f, x, :all)[1] ≈ 12 * x + @test hessian(f, x, :all)[2] ≈ gradient(f, x) ≈ 6 * x^2 + @test hessian(f, x, :all)[3] ≈ gradient(f, x, :all)[2] ≈ 2 * x^3 + end + @testsection "f: scalar -> AbstractTensor" begin + for dim in 1:3, S in (rand(Vec{dim}), rand(Tensor{2,dim}), rand(SymmetricTensor{2,dim})) + f(x) = x^3 * S + x = 3.0 + @test gradient(f, x) ≈ gradient(f, x, :all)[1] ≈ 3 * x^2 * S + @test gradient(f, x, :all)[2] ≈ f(x) ≈ x^3 * S + @test hessian(f, x) ≈ hessian(f, x, :all)[1] ≈ 6 * x * S + @test hessian(f, x, :all)[2] ≈ gradient(f, x) ≈ 3 * x^2 * S + @test hessian(f, x, :all)[3] ≈ f(x) ≈ x^3 * S + end + end + @testsection "mixed scalar/vec" begin + f(v::Vec, s::Number) = s * v[1] * v[2] + v = Vec((2.0, 3.0)) + s = 4.0 + @test gradient(x -> f(v, x), s)::Float64 ≈ v[1] * v[2] + @test gradient(x -> f(x, s), v)::Vec{2} ≈ Vec((s * v[2], s * v[1])) + @test gradient(y -> gradient(x -> f(y, x), s), v)::Vec{2} ≈ Vec((v[2], v[1])) + @test gradient(y -> gradient(x -> f(x, y), v), s)::Vec{2} ≈ Vec((v[2], v[1])) + end + + + @testsection "analytical gradient implementation" begin + # Consider the function f(g(x)), we need to test the following variations to cover all cases + # * x::Number + # - g::Number, f:: Number, Vec, Tensor{2}, SymmetricTensor{2} + # - g::Vec, f:: Number, Vec + # - g::Tensor{2}, f:: Number, Tensor{2} + # - g::SymmetricTensor{2}, f:: Number, SymmetricTensor{2} + # * x::Vec + # - g::Number, f:: Number, Vec + # - g::Vec, f:: Number, Vec + # * x::Tensor{2} + # - g::Number, f:: Number, Tensor{2} + # - g::Tensor{2}, f:: Number, Tensor{2} + # * x::SymmetricTensor{2} + # - g::Number, f:: Number, SymmetricTensor{2} + # - g::SymmetricTensor{2}, f:: Number, SymmetricTensor{2} + # + # Define the different types of functions required to test the cases above. Naming convention: + # tm_tn: Function taking in tensor of order n and outputting tensor of order m. (Number=0, SymmetricTensor{2}=s) + # _ana: Suffix for the function for which the analytical derivative is implemented for + # Define this function with AbstractFloat input to give error if not properly overloaded for Dual + # as `isa(ForwardDiff.Dual(1, 1), AbstractFloat) = false` + for dim = 1:3 + + a0 = rand(); a1 = rand(Vec{dim}); a2 = rand(Tensor{2,dim}); as = rand(SymmetricTensor{2,dim}) + x0 = rand(); x1 = rand(Vec{dim}); x2 = rand(Tensor{2,dim}); xs = rand(SymmetricTensor{2,dim}) + + # Scalar input (t0) + t0_t0(x) = sin(x); t0_t0_ana(x::AbstractFloat) = t0_t0(x); d_t0_t0(x_) = reverse(gradient(t0_t0, x_, :all)); @implement_gradient t0_t0_ana d_t0_t0 + t1_t0(x) = x*a1; t1_t0_ana(x::AbstractFloat) = t1_t0(x); d_t1_t0(x_) = reverse(gradient(t1_t0, x_, :all)); @implement_gradient t1_t0_ana d_t1_t0 + t2_t0(x) = x*a2; t2_t0_ana(x::AbstractFloat) = t2_t0(x); d_t2_t0(x_) = reverse(gradient(t2_t0, x_, :all)); @implement_gradient t2_t0_ana d_t2_t0 + ts_t0(x) = x*as; ts_t0_ana(x::AbstractFloat) = ts_t0(x); d_ts_t0(x_) = reverse(gradient(ts_t0, x_, :all)); @implement_gradient ts_t0_ana d_ts_t0 + + @test gradient(t0_t0, x0) ≈ gradient(t0_t0_ana, x0) + @test gradient(t1_t0, x0) ≈ gradient(t1_t0_ana, x0) + @test gradient(t2_t0, x0) ≈ gradient(t2_t0_ana, x0) + @test gradient(ts_t0, x0) ≈ gradient(ts_t0_ana, x0) + + # Vector input (t1) + t0_t1(x) = norm(x); t0_t1_ana(x::Vec{<:Any,<:AbstractFloat}) = t0_t1(x); d_t0_t1(x_) = reverse(gradient(t0_t1, x_, :all)); @implement_gradient t0_t1_ana d_t0_t1 + t1_t1(x) = a0*x; t1_t1_ana(x::Vec{<:Any,<:AbstractFloat}) = t1_t1(x); d_t1_t1(x_) = reverse(gradient(t1_t1, x_, :all)); @implement_gradient t1_t1_ana d_t1_t1 + + @test gradient(t0_t1, x1) ≈ gradient(t0_t1_ana, x1) + @test gradient(t1_t1, x1) ≈ gradient(t1_t1_ana, x1) + + # 2nd order tensor input (t2) + t0_t2(x) = norm(x); t0_t2_ana(x::Tensor{2,<:Any,<:AbstractFloat}) = t0_t2(x); d_t0_t2(x_) = reverse(gradient(t0_t2, x_, :all)); @implement_gradient t0_t2_ana d_t0_t2 + t2_t2(x) = dot(x,x); t2_t2_ana(x::Tensor{2,<:Any,<:AbstractFloat}) = t2_t2(x); d_t2_t2(x_) = reverse(gradient(t2_t2, x_, :all)); @implement_gradient t2_t2_ana d_t2_t2 + + @test gradient(t0_t2, x2) ≈ gradient(t0_t2_ana, x2) + @test gradient(t2_t2, x2) ≈ gradient(t2_t2_ana, x2) + + # 2nd order symmetric tensor input (ts) + t0_ts(x) = norm(x); t0_ts_ana(x::SymmetricTensor{2,<:Any,<:AbstractFloat}) = t0_ts(x); d_t0_ts(x_) = reverse(gradient(t0_ts, x_, :all)); @implement_gradient t0_ts_ana d_t0_ts + ts_ts(x) = dot(x); ts_ts_ana(x::SymmetricTensor{2,<:Any,<:AbstractFloat}) = ts_ts(x); d_ts_ts(x_) = reverse(gradient(ts_ts, x_, :all)); @implement_gradient ts_ts_ana d_ts_ts + + @test gradient(t0_ts, xs) ≈ gradient(t0_ts_ana, xs) + @test gradient(ts_ts, xs) ≈ gradient(ts_ts_ana, xs) + + # Test combined functions. The important is that another function is called first! + # f(g(x)): naming "f_g_x" + # x scalar, g scalar, f: scalar,Vec,Tensor{2},SymmetricTensor{2}: + t0_t0_t0(x) = t0_t0(t0_t0(x)); t0_t0_t0_ana(x) = t0_t0_ana(t0_t0(x)) + t1_t0_t0(x) = t1_t0(t0_t0(x)); t1_t0_t0_ana(x) = t1_t0_ana(t0_t0(x)) + t2_t0_t0(x) = t2_t0(t0_t0(x)); t2_t0_t0_ana(x) = t2_t0_ana(t0_t0(x)) + ts_t0_t0(x) = ts_t0(t0_t0(x)); ts_t0_t0_ana(x) = ts_t0_ana(t0_t0(x)) + + @test gradient(t0_t0_t0, x0) ≈ gradient(t0_t0_t0_ana, x0) + @test gradient(t1_t0_t0, x0) ≈ gradient(t1_t0_t0_ana, x0) + @test gradient(t2_t0_t0, x0) ≈ gradient(t2_t0_t0_ana, x0) + @test gradient(ts_t0_t0, x0) ≈ gradient(ts_t0_t0_ana, x0) + + # f(g(x)): x scalar, g vector, f: scalar,vector + t0_t1_t0(x) = t0_t1(t1_t0(x)); t0_t1_t0_ana(x) = t0_t1_ana(t1_t0(x)) + t1_t1_t0(x) = t1_t1(t1_t0(x)); t1_t1_t0_ana(x) = t1_t1_ana(t1_t0(x)) + + @test gradient(t0_t1_t0, x0) ≈ gradient(t0_t1_t0_ana, x0) + @test gradient(t1_t1_t0, x0) ≈ gradient(t1_t1_t0_ana, x0) + + # f(g(x)): x scalar, g Tensor{2}, f:scalar, Tensor{2} + t0_t2_t0(x) = t0_t2(t2_t0(x)); t0_t2_t0_ana(x) = t0_t2_ana(t2_t0(x)) + t2_t2_t0(x) = t2_t2(t2_t0(x)); t2_t2_t0_ana(x) = t2_t2_ana(t2_t0(x)); + + @test gradient(t0_t2_t0, x0) ≈ gradient(t0_t2_t0_ana, x0) + @test gradient(t2_t2_t0, x0) ≈ gradient(t2_t2_t0_ana, x0) + + # f(g(x)): x scalar, g SymmetricTensor{2}, f:scalar, SymmetricTensor{2} + t0_ts_t0(x) = t0_ts(ts_t0(x)); t0_ts_t0_ana(x) = t0_ts_ana(ts_t0(x)) + ts_ts_t0(x) = ts_ts(ts_t0(x)); ts_ts_t0_ana(x) = ts_ts_ana(ts_t0(x)) + + @test gradient(t0_ts_t0, x0) ≈ gradient(t0_ts_t0_ana, x0) + @test gradient(ts_ts_t0, x0) ≈ gradient(ts_ts_t0_ana, x0) + + # x Vec, g scalar, f: scalar, Vec + t0_t0_t1(x) = t0_t0(t0_t1(x)); t0_t0_t1_ana(x) = t0_t0_ana(t0_t1(x)) + t1_t0_t1(x) = t1_t0(t0_t1(x)); t1_t0_t1_ana(x) = t1_t0_ana(t0_t1(x)) + + @test gradient(t1_t0_t1, x1) ≈ gradient(t1_t0_t1_ana, x1) + @test gradient(t1_t0_t1, x1) ≈ gradient(t1_t0_t1_ana, x1) + + # x Vec, g Vec, f: scalar, Vec + t0_t1_t1(x) = t0_t1(t1_t1(x)); t0_t1_t1_ana(x) = t0_t1_ana(t1_t1(x)) + t1_t1_t1(x) = t1_t1(t1_t1(x)); t1_t1_t1_ana(x) = t1_t1_ana(t1_t1(x)) + + @test gradient(t0_t1_t1, x1) ≈ gradient(t0_t1_t1_ana, x1) + @test gradient(t1_t1_t1, x1) ≈ gradient(t1_t1_t1_ana, x1) + + # x Tensor{2}, g scalar, f: scalar, Tensor{2} + t0_t0_t2(x) = t0_t0(t0_t1(x)); t0_t0_t2_ana(x) = t0_t0_ana(t0_t2(x)) + t2_t0_t2(x) = t2_t0(t0_t2(x)); t2_t0_t2_ana(x) = t2_t0_ana(t0_t2(x)) + + @test gradient(t0_t0_t2, x2) ≈ gradient(t0_t0_t2_ana, x2) + @test gradient(t2_t0_t2, x2) ≈ gradient(t2_t0_t2_ana, x2) + + # x Tensor{2}, g Tensor{2}, f: scalar, Tensor{2} + t0_t2_t2(x) = t0_t2(t2_t2(x)); t0_t2_t2_ana(x) = t0_t2_ana(t2_t2(x)) + t2_t2_t2(x) = t2_t2(t2_t2(x)); t2_t2_t2_ana(x) = t2_t2_ana(t2_t2(x)) + + @test gradient(t0_t2_t2, x2) ≈ gradient(t0_t2_t2_ana, x2) + @test gradient(t2_t2_t2, x2) ≈ gradient(t2_t2_t2_ana, x2) + + # x SymmetricTensor{2}, g scalar, f: scalar, SymmetricTensor{2} + t0_t0_ts(x) = t0_t0(t0_t1(x)); t0_t0_ts_ana(x) = t0_t0_ana(t0_ts(x)) + ts_t0_ts(x) = ts_t0(t0_ts(x)); ts_t0_ts_ana(x) = ts_t0_ana(t0_ts(x)) + + @test gradient(t0_t0_ts, xs) ≈ gradient(t0_t0_ts_ana, xs) + @test gradient(ts_t0_ts, xs) ≈ gradient(ts_t0_ts_ana, xs) + + # x SymmetricTensor{2}, g SymmetricTensor{2}, f: scalar, SymmetricTensor{2} + t0_ts_ts(x) = t0_ts(ts_ts(x)); t0_ts_ts_ana(x) = t0_ts_ana(ts_ts(x)) + ts_ts_ts(x) = ts_ts(ts_ts(x)); ts_ts_ts_ana(x) = ts_ts_ana(ts_ts(x)) + + @test gradient(t0_ts_ts, xs) ≈ gradient(t0_ts_ts_ana, xs) + @test gradient(ts_ts_ts, xs) ≈ gradient(ts_ts_ts_ana, xs) + + end + + end + +end # testsection diff --git a/test/test_misc.jl b/test/test_misc.jl index 05fa1aab..be8f39f9 100644 --- a/test/test_misc.jl +++ b/test/test_misc.jl @@ -1,672 +1,672 @@ -@testsection "constructors" begin -for T in (Float32, Float64, F64), dim in (1,2,3), order in (1,2,3,4) - for op in (rand, zero, ones, randn) - # Tensor, SymmetricTensor - for TensorType in (Tensor, SymmetricTensor) - TensorType == SymmetricTensor && isodd(order) && continue - N = Tensors.n_components(TensorType{order, dim}) - t = (@inferred (op)(TensorType{order, dim}))::TensorType{order, dim, Float64} - t = (@inferred (op)(TensorType{order, dim, T}))::TensorType{order, dim, T} - t = (@inferred (op)(TensorType{order, dim, T, N}))::TensorType{order, dim, T} - t = (@inferred (op)(t))::TensorType{order, dim, T} - - op == zero && @test zero(TensorType{order, dim, T}) == zeros(T, size(t)) - op == ones && @test ones(TensorType{order, dim, T}) == ones(T, size(t)) - end - # Vec - if order == 1 - (@inferred (op)(Vec{dim}))::Tensor{order, dim, Float64} - (@inferred (op)(Vec{dim, T}))::Tensor{order, dim, T} - end - end - # Special Vec constructor - if order == 1 - t = ntuple(i -> T(i), dim) - @test (@inferred Vec(t))::Tensor{1,dim,T,dim} == Vec{dim}(t) - @test (@inferred Vec(t...))::Tensor{1,dim,T,dim} == Vec{dim}(t) - end - for TensorType in (Tensor, SymmetricTensor), (func, el) in ((zeros, zero), (ones, one)) - TensorType == SymmetricTensor && isodd(order) && continue - isodd(order) && func == ones && continue # one not supported for Vec's - N = Tensors.n_components(TensorType{order, dim}) - tens_arr1 = func(TensorType{order, dim}, 1) - tens_arr2 = func(TensorType{order, dim, T}, 2, 2) - tens_arr3 = func(TensorType{order, dim, T, N}, 3, 3, 3) - @test tens_arr1[1] == tens_arr2[1, 1] == tens_arr3[1, 1, 1] == el(TensorType{order, dim, T}) - @test eltype(tens_arr1) == TensorType{order, dim, Float64, N} - @test eltype(tens_arr2) == eltype(tens_arr3) == TensorType{order, dim, T, N} - end -end -for dim in (1, 2, 3) - # Heterogeneous tuple/type unstable function - z(i, jkl...) = i % 2 == 0 ? 0 : float(0) - @test Vec{dim}(ntuple(z, dim))::Vec{dim,Float64} == - Vec{dim}(z)::Vec{dim,Float64} - @test Vec{dim,Float32}(ntuple(z, dim))::Vec{dim,Float32} == - Vec{dim,Float32}(z)::Vec{dim,Float32} - for order in (1, 2, 3, 4) - N = Tensors.n_components(Tensor{order,dim}) - @test Tensor{order,dim}(ntuple(z, N))::Tensor{order,dim,Float64} == - Tensor{order,dim}(z)::Tensor{order,dim,Float64} - @test Tensor{order,dim,Float32}(ntuple(z, N))::Tensor{order,dim,Float32} == - Tensor{order,dim,Float32}(z)::Tensor{order,dim,Float32} - @test_throws MethodError Tensor{order,dim}(ntuple(z, N+1)) - isodd(order) && continue - N = Tensors.n_components(SymmetricTensor{order,dim}) - @test SymmetricTensor{order,dim}(ntuple(z, N))::SymmetricTensor{order,dim,Float64} == - SymmetricTensor{order,dim}(z)::SymmetricTensor{order,dim,Float64} - @test SymmetricTensor{order,dim,Float32}(ntuple(z, N))::SymmetricTensor{order,dim,Float32} == - SymmetricTensor{order,dim,Float32}(z)::SymmetricTensor{order,dim,Float32} - @test_throws MethodError SymmetricTensor{order,dim}(ntuple(z, N+1)) - end -end -# Number type which is not <: Real but <: Number (Tensors#154) -@test Vec{3, NotReal}((1, 2, 3)) isa Vec{3, NotReal} -end # of testset - -@testsection "diagm, one" begin -for T in (Float32, Float64), dim in (1,2,3) - # diagm - v = rand(T, dim) - vt = (v...,) - - @test (@inferred diagm(Tensor{2, dim}, v))::Tensor{2, dim, T} == diagm(0 => v) - @test (@inferred diagm(Tensor{2, dim}, vt))::Tensor{2, dim, T} == diagm(0 => v) - @test (@inferred diagm(SymmetricTensor{2, dim}, v))::SymmetricTensor{2, dim, T} == diagm(0 => v) - @test (@inferred diagm(SymmetricTensor{2, dim}, vt))::SymmetricTensor{2, dim, T} == diagm(0 => v) - - v = rand(T); vv = v * ones(T, dim) - @test (@inferred diagm(Tensor{2, dim}, v))::Tensor{2, dim, T} == diagm(0 => vv) - @test (@inferred diagm(SymmetricTensor{2, dim}, v))::SymmetricTensor{2, dim, T} == diagm(0 => vv) - - # one - @test one(Tensor{2, dim, T}) == diagm(Tensor{2, dim}, one(T)) == Matrix(I, dim, dim) - @test one(SymmetricTensor{2, dim, T}) == diagm(SymmetricTensor{2, dim}, one(T)) == Matrix(I, dim, dim) - - M = 1 # dummy - @test one(Tensor{2, dim, T, M}) == one(Tensor{2, dim, T}) - @test one(SymmetricTensor{2, dim, T, M}) == one(SymmetricTensor{2, dim, T}) - - _I = (@inferred one(Tensor{2, dim, T}))::Tensor{2, dim, T} - II = (@inferred one(Tensor{4, dim, T}))::Tensor{4, dim, T} - I_sym = (@inferred one(SymmetricTensor{2, dim, T}))::SymmetricTensor{2, dim, T} - II_sym = (@inferred one(SymmetricTensor{4, dim, T}))::SymmetricTensor{4, dim, T} - for i in 1:dim, j in 1:dim - if i == j - @test _I[i,j] == T(1) - @test I_sym[i,j] == T(1) - else - @test _I[i,j] == T(0) - @test I_sym[i,j] == T(0) - end - for k in 1:dim, l in 1:dim - if i == k && j == l - @test II[i,j,k,l] == T(1) - if i == l && j == k - @test II_sym[i,j,k,l] == T(1) - else - @test II_sym[i,j,k,l] == T(1) / 2 - end - else - @test II[i,j,k,l] == T(0) - if i == l && j == k - @test II_sym[i,j,k,l] == T(1) / 2 - else - @test II_sym[i,j,k,l] == T(0) - end - end - end - end -end -end # of testset - -@testsection "base vectors" begin -for T in (Float32, Float64, F64), dim in (1,2,3) - eᵢ_func(i) = Tensor{1, dim, T}(j->j==i ? one(T) : zero(T)) - - a = rand(Vec{dim, T}) - for i in 1:dim - @test eᵢ(a, i) == eᵢ(typeof(a), i) == eᵢ(a)[i] == eᵢ(typeof(a))[i] == eᵢ_func(i) - end - - b = zero(a) - for i in 1:dim - @test a[i] == eᵢ(a, i) ⋅ a - b += eᵢ(a, i) * a[i] - end - @test a ≈ b -end -end # of testset - -@testsection "simple math" begin -for T in (Float32, Float64), dim in (1,2,3), order in (1,2,4), TensorType in (Tensor, SymmetricTensor) - TensorType == SymmetricTensor && isodd(order) && continue - t = rand(TensorType{order, dim, T}) - - # Binary tensor tensor: +, - - @test (@inferred t + t)::TensorType{order, dim} == Array(t) + Array(t) - @test (@inferred 2*t)::TensorType{order, dim} == 2 * Array(t) - @test (@inferred t - t)::TensorType{order, dim} == Array(t) - Array(t) - @test (@inferred 0*t)::TensorType{order, dim} == 0 * Array(t) - - # Binary tensor number: *, / - @test (@inferred 0.5 * t)::TensorType{order, dim} ≈ 0.5 * Array(t) - @test (@inferred t * 0.5)::TensorType{order, dim} ≈ Array(t) * 0.5 - @test (@inferred t / 2.0)::TensorType{order, dim} ≈ Array(t) / 2.0 - - # Unary: +, - - @test (@inferred +t)::TensorType{order, dim} == zero(t) + t - @test (@inferred -t)::TensorType{order, dim} == zero(t) - t - - if order == 2 - # Power by literal integer - fm3, fm2, fm1, f0, fp1, fp2, fp3 = t -> t^-3, t -> t^-2, t -> t^-1, t -> t^0, t -> t^1, t -> t^2, t -> t^3 - @test (@inferred fm3(t))::typeof(t) ≈ inv(t) ⋅ inv(t) ⋅ inv(t) - @test (@inferred fm2(t))::typeof(t) ≈ inv(t) ⋅ inv(t) - @test (@inferred fm1(t))::typeof(t) ≈ inv(t) - @test (@inferred f0(t))::typeof(t) ≈ one(t) - @test (@inferred fp1(t))::typeof(t) ≈ t - @test (@inferred fp2(t))::typeof(t) ≈ t ⋅ t - @test (@inferred fp3(t))::typeof(t) ≈ t ⋅ t ⋅ t - end - - @test iszero(zero(TensorType{order,dim,T})) - @test !iszero(ones(TensorType{order,dim,T})) -end -end # of testset - -@testsection "construct from function" begin -for T in (Float32, Float64, F64) - for dim in (1,2,3) - fi = (i) -> cos(i) - fij = (i,j) -> cos(i) + sin(j) - fijkl = (i, j, k ,l) -> cos(i) + sin(j) + tan(k) + exp(l) - - vf = (@inferred Vec{dim, T}(fi))::Tensor{1, dim, T} - af = (@inferred Tensor{1, dim, T}(fi))::Tensor{1, dim, T} - Af = (@inferred Tensor{2, dim, T}(fij))::Tensor{2, dim, T} - AAf = (@inferred Tensor{4, dim, T}(fijkl))::Tensor{4, dim, T} - Af_sym = (@inferred SymmetricTensor{2, dim, T}(fij))::SymmetricTensor{2, dim, T} - AAf_sym = (@inferred SymmetricTensor{4, dim, T}(fijkl))::SymmetricTensor{4, dim, T} - - for i in 1:dim - @test vf[i] == T(fi(i)) - @test af[i] == T(fi(i)) - for j in 1:dim - @test Af[i,j] == T(fij(i, j)) - for k in 1:dim, l in 1:dim - @test AAf[i,j,k,l] == T(fijkl(i,j,k,l)) - end - end - end - - for j in 1:dim, i in j:dim - @test Af_sym[i,j] == T(fij(i, j)) - for l in 1:dim, k in l:dim - @test AAf_sym[i,j,k,l] == T(fijkl(i,j,k,l)) - end - end - - # Test fully specified constructors - for vec in (vf,af) - @test vec == typeof(vec)(i->vec[i]) - end - for t2 in (Af, Af_sym) - @test t2 == typeof(t2)((i,j)->t2[i,j]) - end - for t4 in (AAf, AAf_sym) - @test t4 == typeof(t4)((i,j,k,l) -> t4[i,j,k,l]) - end - end -end -end # of testset - -@testsection "construct from Array" begin -for (T1, T2) in ((Float32, Float64), (Float64, Float32)), order in (1,2,4), dim in (1,2,3) - At = rand(Tensor{order, dim}) - gen_data = rand(T1, size(At)) - - @test (@inferred Tensor{order, dim}(gen_data))::Tensor{order, dim, T1} ≈ gen_data - @test (@inferred Tensor{order, dim, T2}(gen_data))::Tensor{order, dim, T2} ≈ gen_data - - if order != 1 - As = rand(SymmetricTensor{order, dim}) - gen_data = rand(T1, size(As)) - At = Tensor{order, dim, T1}(gen_data) - Ats = symmetric(At) - gen_sym_data_full = Array(Ats) - gen_sym_data = T1[Ats.data[i] for i in 1:length(Ats.data)] - - @test (@inferred SymmetricTensor{order, dim}(gen_sym_data_full))::SymmetricTensor{order, dim, T1} ≈ Ats - @test (@inferred SymmetricTensor{order, dim}(gen_sym_data))::SymmetricTensor{order, dim, T1} ≈ Ats - @test (@inferred SymmetricTensor{order, dim, T2}(gen_sym_data_full))::SymmetricTensor{order, dim, T2} ≈ Ats - @test (@inferred SymmetricTensor{order, dim, T2}(gen_sym_data))::SymmetricTensor{order, dim, T2} ≈ Ats - end -end -end # of testset - -@testsection "indexing" begin -for T in (Float32, Float64, F64), dim in (1,2,3), order in (1,2,4) - if order == 1 - data = rand(T, dim) - vect = Tensor{order, dim, T}(data) - for i in 1:dim+1 - if i > dim - @test_throws BoundsError vect[i] - else - @test vect[i] ≈ data[i] - end - end - @test (@inferred vect[:])::Vec{dim, T} == vect - elseif order == 2 - data = rand(T, dim, dim) - symdata = data + data' - S = Tensor{order,dim, T}(data) - Ssym = SymmetricTensor{order,dim, T}(symdata) - @test_throws ArgumentError S[:] - @test_throws ArgumentError Ssym[:] - for i in 1:dim+1, j in 1:dim+1 - if i > dim || j > dim - @test_throws BoundsError S[i, j] - @test_throws BoundsError Ssym[i, j] - else - @test S[i, j] ≈ data[i, j] - @test Ssym[i, j] ≈ symdata[i, j] - # Slice - @test (@inferred S[i,:])::Tensor{1, dim, T} ≈ data[i,:] - @test (@inferred S[:,j])::Tensor{1, dim, T} ≈ data[:,j] - @test (@inferred Ssym[i,:])::Tensor{1, dim, T} ≈ symdata[i,:] - @test (@inferred Ssym[:,j])::Tensor{1, dim, T} ≈ symdata[:,j] - end - end - elseif order == 4 - data = rand(T,dim,dim,dim,dim) - S = Tensor{order,dim, T}(data) - Ssym = symmetric(S) - symdata = Array(Ssym) - @test_throws ArgumentError S[:] - @test_throws ArgumentError Ssym[:] - for i in 1:dim+1, j in 1:dim+1, k in 1:dim+1, l in 1:dim+1 - if i > dim || j > dim || k > dim || l > dim - @test_throws BoundsError S[i, j, k, l] - @test_throws BoundsError Ssym[i, j, k, l] - else - @test S[i, j, k, l] ≈ data[i, j, k, l] - @test Ssym[i, j, k, l] ≈ symdata[i, j, k, l] - end - end - end -end -end # of testset - -@testsection "norm, trace, det, inv, eig" begin -for T in (Float32, Float64, F64), dim in (1,2,3) - # norm - for order in (1,2,4) - t = rand(Tensor{order, dim, T}) - @test (@inferred norm(t))::T ≈ sqrt(sum(abs2, Array(t)[:])) - @test norm((@inferred normalize(t))::Tensor{order, dim, T}) ≈ one(T) - if order != 1 - t_sym = rand(SymmetricTensor{order, dim, T}) - @test (@inferred norm(t_sym))::T ≈ sqrt(sum(abs2, Array(t_sym)[:])) - @test norm((@inferred normalize(t_sym))::SymmetricTensor{order, dim, T}) ≈ one(T) - end - end - - # trace, vol, dev, det, inv (only for second order tensors) - t = rand(Tensor{2, dim, T}) - t_sym = rand(SymmetricTensor{2, dim, T}) - - @test (@inferred tr(t))::T == sum([t[i,i] for i in 1:dim]) - @test (@inferred tr(t_sym))::T == sum([t_sym[i,i] for i in 1:dim]) - - @test tr(t) ≈ mean(t)*3.0 - @test tr(t_sym) ≈ mean(t_sym)*3.0 - - @test (@inferred vol(t))::Tensor{2, dim, T} ≈ mean(t) * Matrix(I, dim, dim) - @test (@inferred vol(t_sym))::SymmetricTensor{2, dim, T} ≈ mean(t_sym) * Matrix(I, dim, dim) - - @test (@inferred dev(t))::Tensor{2, dim, T} ≈ Array(t) - 1/3*tr(t)* Matrix(I, dim, dim) - @test (@inferred dev(t_sym))::SymmetricTensor{2, dim, T} ≈ Array(t_sym) - 1/3*tr(t_sym)* Matrix(I, dim, dim) - - @test (@inferred det(t))::T ≈ det(Array(t)) - @test (@inferred det(t_sym))::T ≈ det(Array(t_sym)) - - @test (@inferred inv(t))::Tensor{2, dim, T} ≈ inv(Array(t)) - @test (@inferred inv(t_sym))::SymmetricTensor{2, dim, T} ≈ inv(Array(t_sym)) - - # inv for fourth order tensors - Random.seed!(1234) - AA = rand(Tensor{4, dim, T}) - AA_sym = rand(SymmetricTensor{4, dim, T}) - @test AA ⊡ (@inferred inv(AA))::Tensor{4, dim, T} ≈ one(Tensor{4, dim, T}) - @test AA_sym ⊡ (@inferred inv(AA_sym))::SymmetricTensor{4, dim, T} ≈ one(SymmetricTensor{4, dim, T}) - - E = @inferred eigen(t_sym) - Λ, Φ = E - Λa, Φa = eigen(Array(t_sym)) - - @test Λ ≈ (@inferred eigvals(t_sym)) ≈ eigvals(E) ≈ Λa - @test Φ ≈ (@inferred eigvecs(t_sym)) ≈ eigvecs(E) - for i in 1:dim - # scale with first element of eigenvector to account for possible directions - @test Φ[:, i]*Φ[1, i] ≈ Φa[:, i]*Φa[1, i] - end - - # test eigenfactorizations for a diagonal tensor - v = rand(T, dim) - d_sym = diagm(SymmetricTensor{2, dim, T}, v) - E = @inferred eigen(d_sym) - Λ, Φ = E - Λa, Φa = eigen(Symmetric(Array(d_sym))) - - @test Λ ≈ (@inferred eigvals(d_sym)) ≈ eigvals(E) ≈ Λa - @test Φ ≈ (@inferred eigvecs(d_sym)) ≈ eigvecs(E) - - # sqrt - Apd = tdot(t_sym) - @test sqrt(Apd) ⋅ sqrt(Apd) ≈ Apd -end -end # of testset - -@testsection "eigen(::FourthOrderTensor)" begin -for T in (Float32, Float64), dim in (1, 2, 3) - Random.seed!(123) - # construct positive definite Voigt-tensor - n = dim*dim - div((dim-1)*dim, 2) - A = rand(T, n, n); A = A'A + I - Aval, Avec = eigen(Hermitian(A)) - perm = sortperm(Aval) - Aval = Aval[perm] - Avec = [Avec[:, i] for i in perm] - - S = frommandel(SymmetricTensor{4,dim,T}, A) - - E = eigen(S) - @test eigvals(E) ≈ Aval - S′ = zero(S) - for i in 1:n - m = tomandel(eigvecs(E)[i]) - @test m / m[1] ≈ Avec[i] / Avec[i][1] - @test S ⊡ E.vectors[i] ≈ E.values[i] * E.vectors[i] - @test norm(E.vectors[i]) ≈ 1 - for j in 1:n - if i == j - @test E.vectors[i] ⊡ E.vectors[j] ≈ 1 - else - @test E.vectors[i] ⊡ E.vectors[j] ≈ 0 atol=10eps(T) - end - end - S′ += E.values[i] * (E.vectors[i] ⊗ E.vectors[i]) - end - @test S ≈ S′ - a, b = E # iteration - @test a == eigvals(E) == E.values - @test b == eigvecs(E) == E.vectors -end -end - -# https://en.wikiversity.org/wiki/Continuum_mechanics/Tensor_algebra_identities -@testsection "tensor identities" begin -for T in (Float32, Float64, F64) - for dim in (1,2,3) - # Identities with second order and first order - A = rand(Tensor{2, dim, T}) - B = rand(Tensor{2, dim, T}) - C = rand(Tensor{2, dim, T}) - I = one(Tensor{2, dim, T}) - a = rand(Tensor{1, dim, T}) - b = rand(Tensor{1, dim, T}) - - @test A ⊡ B ≈ (A' ⋅ B) ⊡ one(A) - @test A ⊡ (a ⊗ b) ≈ (A ⋅ b) ⋅ a - @test (A ⋅ a) ⋅ (B ⋅ b) ≈ (A' ⋅ B) ⊡ (a ⊗ b) - @test (A ⋅ a) ⊗ b ≈ A ⋅ (a ⊗ b) - @test a ⊗ (A ⋅ b) ≈ (A ⋅ (b ⊗ a))' - @test a ⊗ (A ⋅ b) ≈ (a ⊗ b) ⋅ A' - - @test A ⊡ I ≈ tr(A) - @test det(A) ≈ det(A') - @test tr(inv(A) ⋅ A) ≈ dim - @test inv(A) ⋅ A ≈ A \ A ≈ I - @test inv(A) ⋅ a ≈ A \ a - - @test (I ⊗ I) ⊡ A ≈ tr(A) * I - @test (I ⊗ I) ⊡ A ⊡ A ≈ tr(A)^2 - - @test A ⋅ a ≈ a ⋅ A' - - A_sym = rand(SymmetricTensor{2, dim}) - B_sym = rand(SymmetricTensor{2, dim}) - C_sym = rand(SymmetricTensor{2, dim}) - I_sym = one(SymmetricTensor{2, dim}) - - @test A_sym ⊡ I_sym ≈ tr(A_sym) - @test det(A_sym) ≈ det(A_sym') - - @test (I_sym ⊗ I_sym) ⊡ A_sym ≈ tr(A_sym) * I_sym - @test ((I_sym ⊗ I_sym) ⊡ A_sym) ⊡ A_sym ≈ tr(A_sym)^2 - end -end - -for T in (Float32, Float64, F64) - for dim in (1,2,3) - # Identities with identity tensor - II = one(Tensor{4, dim, T}) - I = one(Tensor{2, dim, T}) - AA = rand(Tensor{4, dim, T}) - A = rand(Tensor{2, dim, T}) - II_sym = one(SymmetricTensor{4, dim, T}) - I_sym = one(SymmetricTensor{2, dim, T}) - AA_sym = rand(SymmetricTensor{4, dim, T}) - A_sym = rand(SymmetricTensor{2, dim, T}) - - @test II ⊡ AA ≈ AA - @test AA ⊡ II ≈ AA - @test II ⊡ A ≈ A - @test A ⊡ II ≈ A - @test II ⊡ A ⊡ A ≈ (tr(A' ⋅ A)) - - @test II ⊡ AA_sym ≈ AA_sym - @test AA_sym ⊡ II ≈ AA_sym - @test II ⊡ A_sym ≈ A_sym - @test A_sym ⊡ II ≈ A_sym - @test II ⊡ A_sym ⊡ A_sym ≈ (tr(A_sym' ⋅ A_sym)) - - @test II_sym ⊡ AA_sym ≈ AA_sym - @test AA_sym ⊡ II_sym ≈ AA_sym - @test II_sym ⊡ A_sym ≈ A_sym - @test A_sym ⊡ II_sym ≈ A_sym - @test II_sym ⊡ A_sym ⊡ A_sym ≈ (tr(A_sym' ⋅ A_sym)) - end -end - -for T in (Float64,) - for dim in 1:3 - # Tested operations - # S2 ⋅ S3, S3 ⋅ S2 - # S1 ⊗ S2, S2 ⊗ S1 - # S3 ⊡ S2, S2 ⊡ S3 - - # Identities involving 3rd order tensors - I2 = one(Tensor{2,dim,T}) - A2 = rand(Tensor{2,dim,T}) - S2 = rand(Tensor{2,dim,T}) - S2s = rand(SymmetricTensor{2,dim,T}) - S2sr = Tensor{2,dim,T}(S2s) - v = rand(Vec{dim,T}) - u = rand(Vec{dim,T}) - @test (u⊗I2)⋅v ≈ (u⊗v) - @test u⋅(I2⊗v) ≈ (u⊗v) - @test I2⋅(v⊗S2) ≈ (v⊗S2) - @test v ⊗ (S2⋅A2) ≈ (v ⊗ S2)⋅A2 - @test v ⊗ S2s ≈ v ⊗ S2sr - @test S2s ⊗ v ≈ S2sr ⊗ v - @test (v⊗S2)⊡A2 ≈ v*(S2⊡A2) - @test (v⊗S2)⊡S2s ≈ v*(S2⊡S2s) - @test A2⊡(S2⊗v) ≈ v*(S2⊡A2) - @test S2s⊡(S2⊗v) ≈ v*(S2⊡S2s) - end -end -end # of testset - -@testsection "promotion/conversion" begin -T = Float32 -WIDE_T = widen(T) -for dim in (1,2,3), order in (1,2,4) - - tens = Tensor{order, dim, T, dim^order} - tens_wide = Tensor{order, dim, WIDE_T, dim^order} - - @test promote_type(tens, tens) == tens - @test promote_type(tens_wide, tens) == tens_wide - @test promote_type(tens, tens_wide) == tens_wide - @test promote_type(tens_wide, tens_wide) == tens_wide - - At = rand(tens) - Bt = rand(tens_wide) - @test isa((@inferred At + Bt), tens_wide) - @test isa((@inferred convert(Tensor, At)), tens) - @test isa((@inferred convert(Tensor{order, dim, T}, At)), tens) - @test isa((@inferred convert(tens, At)), tens) - @test isa((@inferred convert(Tensor{order, dim, WIDE_T}, At)), tens_wide) - @test isa((@inferred convert(tens_wide, At)), tens_wide) - - if order != 1 - M = Tensors.n_components(SymmetricTensor{order, dim}) - sym = SymmetricTensor{order, dim, T, M} - sym_wide = SymmetricTensor{order, dim, WIDE_T, M} - - @test promote_type(sym, sym) == sym - @test promote_type(sym_wide, sym) == sym_wide - @test promote_type(sym, sym_wide) == sym_wide - @test promote_type(sym_wide, sym_wide) == sym_wide - - @test promote_type(sym, tens) == tens - @test promote_type(tens, sym) == tens - @test promote_type(sym_wide, tens) == tens_wide - @test promote_type(tens, sym_wide) == tens_wide - @test promote_type(sym, tens_wide) == tens_wide - @test promote_type(tens_wide, sym) == tens_wide - @test promote_type(sym_wide, tens_wide) == tens_wide - - As = rand(sym) - Bs = rand(sym_wide) - @test isa((@inferred As + Bs), sym_wide) - - @test isa((@inferred convert(SymmetricTensor, As)), sym) - @test isa((@inferred convert(SymmetricTensor{order, dim, T}, As)), sym) - @test isa((@inferred convert(sym, As)), sym) - @test isa((@inferred convert(SymmetricTensor{order, dim, WIDE_T}, As)), sym_wide) - @test isa((@inferred convert(sym_wide, As)), sym_wide) - - # SymmetricTensor -> Tensor - @test (@inferred convert(Tensor, As))::tens ≈ As ≈ Array(As) - @test (@inferred convert(Tensor{order, dim}, As))::tens ≈ As ≈ Array(As) - @test (@inferred convert(Tensor{order, dim, WIDE_T}, As))::tens_wide ≈ As ≈ Array(As) - @test (@inferred convert(tens_wide, As))::tens_wide ≈ As ≈ Array(As) - - # Tensor -> SymmetricTensor - if dim != 1 - @test_throws InexactError convert(SymmetricTensor, At) - @test_throws InexactError convert(SymmetricTensor{order, dim}, At) - @test_throws InexactError convert(SymmetricTensor{order, dim, WIDE_T}, At) - @test_throws InexactError convert(typeof(Bs), At) - end - end -end -end # of testset - -using Unitful -@testset "unitful" begin; for dim in 1:3 - A = rand(Tensor{2,dim}) - As = rand(SymmetricTensor{2,dim}) - @test one(A * 1u"Pa")::Tensor{2,dim,Float64} == one(A)::Tensor{2,dim,Float64} - @test one(As * 1u"Pa")::SymmetricTensor{2,dim,Float64} == one(As)::SymmetricTensor{2,dim,Float64} - - # Eigen of 2nd order - M = rand(dim, dim) + 5I; M = M'M - S = SymmetricTensor{2,dim}(M) - SU = SymmetricTensor{2,dim}(M * 1u"Pa") - ST = convert(Tensor, S) - SUT = convert(Tensor, SU) - @test eigvals(Hermitian(M)) ≈ eigvals(S) ≈ ustrip.(eigvals(SU)) - same_no_sign(args...) = all(args) do arg; all(1:dim) do i - args[1][:, i] ≈ arg[:, i] || args[1][:, i] ≈ -arg[:, i] - end end - @test same_no_sign(eigvecs(Hermitian(M)), eigvecs(S), ustrip.(eigvecs(SU))) - @test eltype(eigvals(SU)) == typeof(1.0u"Pa") - @test eltype(eigvecs(SU)) == typeof(one(1.0u"Pa")) - # inv - @test inv(M) ≈ inv(S) ≈ inv(ST) ≈ - ustrip.(inv(SU)) ≈ ustrip.(inv(SUT)) - # (to|from)(voigt|mandel) - @test fromvoigt(SymmetricTensor{2,dim}, tovoigt(SU)) == SU - @test fromvoigt(SymmetricTensor{2,dim}, tovoigt(SU; offdiagscale=5.0); offdiagscale=5.0) ≈ SU - @test frommandel(SymmetricTensor{2,dim}, tomandel(SU)) ≈ SU - @test fromvoigt(Tensor{2,dim}, tovoigt(SUT)) == SUT - # Eigen of 4th order - M = rand(dim+dim, dim+dim) + 5I; M = M'M - S = fromvoigt(SymmetricTensor{4,dim}, M) - SU = fromvoigt(SymmetricTensor{4,dim}, M * 1u"Pa") - @test eigvals(S) ≈ ustrip.(eigvals(SU)) - @test eigvecs(S) ≈ eigvecs(SU) - @test eltype(eigvals(SU)) == typeof(1.0u"Pa") - @test eltype(eigvecs(SU)[1]) == typeof(one(1.0u"Pa")) - # inv - @test inv(S) ⊡ S ≈ inv(SU) ⊡ SU ≈ one(S) - M = rand(dim^2, dim^2) + 5I; M = M'M - ST = fromvoigt(Tensor{4,dim}, M) - SUT = fromvoigt(Tensor{4,dim}, M * 1u"Pa") - @test inv(ST) ⊡ ST ≈ inv(SUT) ⊡ SUT ≈ one(ST) - # (to|from)(voigt|mandel) - @test fromvoigt(SymmetricTensor{4,dim}, tovoigt(SU)) == SU - @test fromvoigt(SymmetricTensor{4,dim}, tovoigt(SU; offdiagscale=5.0); offdiagscale=5.0) ≈ SU - @test frommandel(SymmetricTensor{4,dim}, tomandel(SU)) ≈ SU - @test fromvoigt(Tensor{4,dim}, tovoigt(SUT)) == SUT -end end - -@testset "issues #100, #101: orders of eigenvectors" begin - for (i, j) in ((1, 2), (2, 1)) - S = SymmetricTensor{2,2,Float64}((i, 0, j)) - Λ = diagm(SymmetricTensor{2,2}, eigvals(S)) - Φ = eigvecs(S) - @test Φ ⋅ Λ ⋅ Φ' ≈ S - end - for (i, j, k) in ((1, 2, 3), (1, 3, 2), (2, 1, 3), (2, 3, 1), (3, 1, 2), (3, 2, 1)) - S = SymmetricTensor{2,3,Float64}((i, 0, 0, j, 0, k)) - Λ = diagm(SymmetricTensor{2,3}, eigvals(S)) - Φ = eigvecs(S) - @test Φ ⋅ Λ ⋅ Φ' ≈ S - end -end - -@testset "issue #166: NaN in eigenvectors" begin - M = [ 50.0 -10.0 0.0 - -10.0 40.0 0.0 - 0.0 0.0 0.0] - ME = eigen(Hermitian(M)) - S = SymmetricTensor{2,3}(M) - SE = eigen(S) - @test eigvals(SE) ≈ eigvals(ME) - @test eigvecs(SE) ≈ eigvecs(ME) -end - -@testsection "exceptions" begin - # normal multiplication - A = rand(Tensor{2, 3}) - B = rand(Tensor{2, 3}) - @test_throws Exception A*B - @test_throws Exception A'*B - - AA = rand(Tensor{4, 2}) - A2 = rand(Tensor{2, 2}) - @test_throws DimensionMismatch A + A2 - @test_throws DimensionMismatch AA - A - - # transpose/adjoint of Vec - x = rand(Vec{3}) - @test_throws ArgumentError x' - @test_throws ArgumentError transpose(x) - @test_throws ArgumentError adjoint(x) -end # of testset +@testsection "constructors" begin +for T in (Float32, Float64, F64), dim in (1,2,3), order in (1,2,3,4) + for op in (rand, zero, ones, randn) + # Tensor, SymmetricTensor + for TensorType in (Tensor, SymmetricTensor) + TensorType == SymmetricTensor && isodd(order) && continue + N = Tensors.n_components(TensorType{order, dim}) + t = (@inferred (op)(TensorType{order, dim}))::TensorType{order, dim, Float64} + t = (@inferred (op)(TensorType{order, dim, T}))::TensorType{order, dim, T} + t = (@inferred (op)(TensorType{order, dim, T, N}))::TensorType{order, dim, T} + t = (@inferred (op)(t))::TensorType{order, dim, T} + + op == zero && @test zero(TensorType{order, dim, T}) == zeros(T, size(t)) + op == ones && @test ones(TensorType{order, dim, T}) == ones(T, size(t)) + end + # Vec + if order == 1 + (@inferred (op)(Vec{dim}))::Tensor{order, dim, Float64} + (@inferred (op)(Vec{dim, T}))::Tensor{order, dim, T} + end + end + # Special Vec constructor + if order == 1 + t = ntuple(i -> T(i), dim) + @test (@inferred Vec(t))::Tensor{1,dim,T,dim} == Vec{dim}(t) + @test (@inferred Vec(t...))::Tensor{1,dim,T,dim} == Vec{dim}(t) + end + for TensorType in (Tensor, SymmetricTensor), (func, el) in ((zeros, zero), (ones, one)) + TensorType == SymmetricTensor && isodd(order) && continue + isodd(order) && func == ones && continue # one not supported for Vec's + N = Tensors.n_components(TensorType{order, dim}) + tens_arr1 = func(TensorType{order, dim}, 1) + tens_arr2 = func(TensorType{order, dim, T}, 2, 2) + tens_arr3 = func(TensorType{order, dim, T, N}, 3, 3, 3) + @test tens_arr1[1] == tens_arr2[1, 1] == tens_arr3[1, 1, 1] == el(TensorType{order, dim, T}) + @test eltype(tens_arr1) == TensorType{order, dim, Float64, N} + @test eltype(tens_arr2) == eltype(tens_arr3) == TensorType{order, dim, T, N} + end +end +for dim in (1, 2, 3) + # Heterogeneous tuple/type unstable function + z(i, jkl...) = i % 2 == 0 ? 0 : float(0) + @test Vec{dim}(ntuple(z, dim))::Vec{dim,Float64} == + Vec{dim}(z)::Vec{dim,Float64} + @test Vec{dim,Float32}(ntuple(z, dim))::Vec{dim,Float32} == + Vec{dim,Float32}(z)::Vec{dim,Float32} + for order in (1, 2, 3, 4) + N = Tensors.n_components(Tensor{order,dim}) + @test Tensor{order,dim}(ntuple(z, N))::Tensor{order,dim,Float64} == + Tensor{order,dim}(z)::Tensor{order,dim,Float64} + @test Tensor{order,dim,Float32}(ntuple(z, N))::Tensor{order,dim,Float32} == + Tensor{order,dim,Float32}(z)::Tensor{order,dim,Float32} + @test_throws MethodError Tensor{order,dim}(ntuple(z, N+1)) + isodd(order) && continue + N = Tensors.n_components(SymmetricTensor{order,dim}) + @test SymmetricTensor{order,dim}(ntuple(z, N))::SymmetricTensor{order,dim,Float64} == + SymmetricTensor{order,dim}(z)::SymmetricTensor{order,dim,Float64} + @test SymmetricTensor{order,dim,Float32}(ntuple(z, N))::SymmetricTensor{order,dim,Float32} == + SymmetricTensor{order,dim,Float32}(z)::SymmetricTensor{order,dim,Float32} + @test_throws MethodError SymmetricTensor{order,dim}(ntuple(z, N+1)) + end +end +# Number type which is not <: Real but <: Number (Tensors#154) +@test Vec{3, NotReal}((1, 2, 3)) isa Vec{3, NotReal} +end # of testset + +@testsection "diagm, one" begin +for T in (Float32, Float64), dim in (1,2,3) + # diagm + v = rand(T, dim) + vt = (v...,) + + @test (@inferred diagm(Tensor{2, dim}, v))::Tensor{2, dim, T} == diagm(0 => v) + @test (@inferred diagm(Tensor{2, dim}, vt))::Tensor{2, dim, T} == diagm(0 => v) + @test (@inferred diagm(SymmetricTensor{2, dim}, v))::SymmetricTensor{2, dim, T} == diagm(0 => v) + @test (@inferred diagm(SymmetricTensor{2, dim}, vt))::SymmetricTensor{2, dim, T} == diagm(0 => v) + + v = rand(T); vv = v * ones(T, dim) + @test (@inferred diagm(Tensor{2, dim}, v))::Tensor{2, dim, T} == diagm(0 => vv) + @test (@inferred diagm(SymmetricTensor{2, dim}, v))::SymmetricTensor{2, dim, T} == diagm(0 => vv) + + # one + @test one(Tensor{2, dim, T}) == diagm(Tensor{2, dim}, one(T)) == Matrix(I, dim, dim) + @test one(SymmetricTensor{2, dim, T}) == diagm(SymmetricTensor{2, dim}, one(T)) == Matrix(I, dim, dim) + + M = 1 # dummy + @test one(Tensor{2, dim, T, M}) == one(Tensor{2, dim, T}) + @test one(SymmetricTensor{2, dim, T, M}) == one(SymmetricTensor{2, dim, T}) + + _I = (@inferred one(Tensor{2, dim, T}))::Tensor{2, dim, T} + II = (@inferred one(Tensor{4, dim, T}))::Tensor{4, dim, T} + I_sym = (@inferred one(SymmetricTensor{2, dim, T}))::SymmetricTensor{2, dim, T} + II_sym = (@inferred one(SymmetricTensor{4, dim, T}))::SymmetricTensor{4, dim, T} + for i in 1:dim, j in 1:dim + if i == j + @test _I[i,j] == T(1) + @test I_sym[i,j] == T(1) + else + @test _I[i,j] == T(0) + @test I_sym[i,j] == T(0) + end + for k in 1:dim, l in 1:dim + if i == k && j == l + @test II[i,j,k,l] == T(1) + if i == l && j == k + @test II_sym[i,j,k,l] == T(1) + else + @test II_sym[i,j,k,l] == T(1) / 2 + end + else + @test II[i,j,k,l] == T(0) + if i == l && j == k + @test II_sym[i,j,k,l] == T(1) / 2 + else + @test II_sym[i,j,k,l] == T(0) + end + end + end + end +end +end # of testset + +@testsection "base vectors" begin +for T in (Float32, Float64, F64), dim in (1,2,3) + eᵢ_func(i) = Tensor{1, dim, T}(j->j==i ? one(T) : zero(T)) + + a = rand(Vec{dim, T}) + for i in 1:dim + @test eᵢ(a, i) == eᵢ(typeof(a), i) == eᵢ(a)[i] == eᵢ(typeof(a))[i] == eᵢ_func(i) + end + + b = zero(a) + for i in 1:dim + @test a[i] == eᵢ(a, i) ⋅ a + b += eᵢ(a, i) * a[i] + end + @test a ≈ b +end +end # of testset + +@testsection "simple math" begin +for T in (Float32, Float64), dim in (1,2,3), order in (1,2,4), TensorType in (Tensor, SymmetricTensor) + TensorType == SymmetricTensor && isodd(order) && continue + t = rand(TensorType{order, dim, T}) + + # Binary tensor tensor: +, - + @test (@inferred t + t)::TensorType{order, dim} == Array(t) + Array(t) + @test (@inferred 2*t)::TensorType{order, dim} == 2 * Array(t) + @test (@inferred t - t)::TensorType{order, dim} == Array(t) - Array(t) + @test (@inferred 0*t)::TensorType{order, dim} == 0 * Array(t) + + # Binary tensor number: *, / + @test (@inferred 0.5 * t)::TensorType{order, dim} ≈ 0.5 * Array(t) + @test (@inferred t * 0.5)::TensorType{order, dim} ≈ Array(t) * 0.5 + @test (@inferred t / 2.0)::TensorType{order, dim} ≈ Array(t) / 2.0 + + # Unary: +, - + @test (@inferred +t)::TensorType{order, dim} == zero(t) + t + @test (@inferred -t)::TensorType{order, dim} == zero(t) - t + + if order == 2 + # Power by literal integer + fm3, fm2, fm1, f0, fp1, fp2, fp3 = t -> t^-3, t -> t^-2, t -> t^-1, t -> t^0, t -> t^1, t -> t^2, t -> t^3 + @test (@inferred fm3(t))::typeof(t) ≈ inv(t) ⋅ inv(t) ⋅ inv(t) + @test (@inferred fm2(t))::typeof(t) ≈ inv(t) ⋅ inv(t) + @test (@inferred fm1(t))::typeof(t) ≈ inv(t) + @test (@inferred f0(t))::typeof(t) ≈ one(t) + @test (@inferred fp1(t))::typeof(t) ≈ t + @test (@inferred fp2(t))::typeof(t) ≈ t ⋅ t + @test (@inferred fp3(t))::typeof(t) ≈ t ⋅ t ⋅ t + end + + @test iszero(zero(TensorType{order,dim,T})) + @test !iszero(ones(TensorType{order,dim,T})) +end +end # of testset + +@testsection "construct from function" begin +for T in (Float32, Float64, F64) + for dim in (1,2,3) + fi = (i) -> cos(i) + fij = (i,j) -> cos(i) + sin(j) + fijkl = (i, j, k ,l) -> cos(i) + sin(j) + tan(k) + exp(l) + + vf = (@inferred Vec{dim, T}(fi))::Tensor{1, dim, T} + af = (@inferred Tensor{1, dim, T}(fi))::Tensor{1, dim, T} + Af = (@inferred Tensor{2, dim, T}(fij))::Tensor{2, dim, T} + AAf = (@inferred Tensor{4, dim, T}(fijkl))::Tensor{4, dim, T} + Af_sym = (@inferred SymmetricTensor{2, dim, T}(fij))::SymmetricTensor{2, dim, T} + AAf_sym = (@inferred SymmetricTensor{4, dim, T}(fijkl))::SymmetricTensor{4, dim, T} + + for i in 1:dim + @test vf[i] == T(fi(i)) + @test af[i] == T(fi(i)) + for j in 1:dim + @test Af[i,j] == T(fij(i, j)) + for k in 1:dim, l in 1:dim + @test AAf[i,j,k,l] == T(fijkl(i,j,k,l)) + end + end + end + + for j in 1:dim, i in j:dim + @test Af_sym[i,j] == T(fij(i, j)) + for l in 1:dim, k in l:dim + @test AAf_sym[i,j,k,l] == T(fijkl(i,j,k,l)) + end + end + + # Test fully specified constructors + for vec in (vf,af) + @test vec == typeof(vec)(i->vec[i]) + end + for t2 in (Af, Af_sym) + @test t2 == typeof(t2)((i,j)->t2[i,j]) + end + for t4 in (AAf, AAf_sym) + @test t4 == typeof(t4)((i,j,k,l) -> t4[i,j,k,l]) + end + end +end +end # of testset + +@testsection "construct from Array" begin +for (T1, T2) in ((Float32, Float64), (Float64, Float32)), order in (1,2,4), dim in (1,2,3) + At = rand(Tensor{order, dim}) + gen_data = rand(T1, size(At)) + + @test (@inferred Tensor{order, dim}(gen_data))::Tensor{order, dim, T1} ≈ gen_data + @test (@inferred Tensor{order, dim, T2}(gen_data))::Tensor{order, dim, T2} ≈ gen_data + + if order != 1 + As = rand(SymmetricTensor{order, dim}) + gen_data = rand(T1, size(As)) + At = Tensor{order, dim, T1}(gen_data) + Ats = symmetric(At) + gen_sym_data_full = Array(Ats) + gen_sym_data = T1[Ats.data[i] for i in 1:length(Ats.data)] + + @test (@inferred SymmetricTensor{order, dim}(gen_sym_data_full))::SymmetricTensor{order, dim, T1} ≈ Ats + @test (@inferred SymmetricTensor{order, dim}(gen_sym_data))::SymmetricTensor{order, dim, T1} ≈ Ats + @test (@inferred SymmetricTensor{order, dim, T2}(gen_sym_data_full))::SymmetricTensor{order, dim, T2} ≈ Ats + @test (@inferred SymmetricTensor{order, dim, T2}(gen_sym_data))::SymmetricTensor{order, dim, T2} ≈ Ats + end +end +end # of testset + +@testsection "indexing" begin +for T in (Float32, Float64, F64), dim in (1,2,3), order in (1,2,4) + if order == 1 + data = rand(T, dim) + vect = Tensor{order, dim, T}(data) + for i in 1:dim+1 + if i > dim + @test_throws BoundsError vect[i] + else + @test vect[i] ≈ data[i] + end + end + @test (@inferred vect[:])::Vec{dim, T} == vect + elseif order == 2 + data = rand(T, dim, dim) + symdata = data + data' + S = Tensor{order,dim, T}(data) + Ssym = SymmetricTensor{order,dim, T}(symdata) + @test_throws ArgumentError S[:] + @test_throws ArgumentError Ssym[:] + for i in 1:dim+1, j in 1:dim+1 + if i > dim || j > dim + @test_throws BoundsError S[i, j] + @test_throws BoundsError Ssym[i, j] + else + @test S[i, j] ≈ data[i, j] + @test Ssym[i, j] ≈ symdata[i, j] + # Slice + @test (@inferred S[i,:])::Tensor{1, dim, T} ≈ data[i,:] + @test (@inferred S[:,j])::Tensor{1, dim, T} ≈ data[:,j] + @test (@inferred Ssym[i,:])::Tensor{1, dim, T} ≈ symdata[i,:] + @test (@inferred Ssym[:,j])::Tensor{1, dim, T} ≈ symdata[:,j] + end + end + elseif order == 4 + data = rand(T,dim,dim,dim,dim) + S = Tensor{order,dim, T}(data) + Ssym = symmetric(S) + symdata = Array(Ssym) + @test_throws ArgumentError S[:] + @test_throws ArgumentError Ssym[:] + for i in 1:dim+1, j in 1:dim+1, k in 1:dim+1, l in 1:dim+1 + if i > dim || j > dim || k > dim || l > dim + @test_throws BoundsError S[i, j, k, l] + @test_throws BoundsError Ssym[i, j, k, l] + else + @test S[i, j, k, l] ≈ data[i, j, k, l] + @test Ssym[i, j, k, l] ≈ symdata[i, j, k, l] + end + end + end +end +end # of testset + +@testsection "norm, trace, det, inv, eig" begin +for T in (Float32, Float64, F64), dim in (1,2,3) + # norm + for order in (1,2,4) + t = rand(Tensor{order, dim, T}) + @test (@inferred norm(t))::T ≈ sqrt(sum(abs2, Array(t)[:])) + @test norm((@inferred normalize(t))::Tensor{order, dim, T}) ≈ one(T) + if order != 1 + t_sym = rand(SymmetricTensor{order, dim, T}) + @test (@inferred norm(t_sym))::T ≈ sqrt(sum(abs2, Array(t_sym)[:])) + @test norm((@inferred normalize(t_sym))::SymmetricTensor{order, dim, T}) ≈ one(T) + end + end + + # trace, vol, dev, det, inv (only for second order tensors) + t = rand(Tensor{2, dim, T}) + t_sym = rand(SymmetricTensor{2, dim, T}) + + @test (@inferred tr(t))::T == sum([t[i,i] for i in 1:dim]) + @test (@inferred tr(t_sym))::T == sum([t_sym[i,i] for i in 1:dim]) + + @test tr(t) ≈ mean(t)*3.0 + @test tr(t_sym) ≈ mean(t_sym)*3.0 + + @test (@inferred vol(t))::Tensor{2, dim, T} ≈ mean(t) * Matrix(I, dim, dim) + @test (@inferred vol(t_sym))::SymmetricTensor{2, dim, T} ≈ mean(t_sym) * Matrix(I, dim, dim) + + @test (@inferred dev(t))::Tensor{2, dim, T} ≈ Array(t) - 1/3*tr(t)* Matrix(I, dim, dim) + @test (@inferred dev(t_sym))::SymmetricTensor{2, dim, T} ≈ Array(t_sym) - 1/3*tr(t_sym)* Matrix(I, dim, dim) + + @test (@inferred det(t))::T ≈ det(Array(t)) + @test (@inferred det(t_sym))::T ≈ det(Array(t_sym)) + + @test (@inferred inv(t))::Tensor{2, dim, T} ≈ inv(Array(t)) + @test (@inferred inv(t_sym))::SymmetricTensor{2, dim, T} ≈ inv(Array(t_sym)) + + # inv for fourth order tensors + Random.seed!(1234) + AA = rand(Tensor{4, dim, T}) + AA_sym = rand(SymmetricTensor{4, dim, T}) + @test AA ⊡ (@inferred inv(AA))::Tensor{4, dim, T} ≈ one(Tensor{4, dim, T}) + @test AA_sym ⊡ (@inferred inv(AA_sym))::SymmetricTensor{4, dim, T} ≈ one(SymmetricTensor{4, dim, T}) + + E = @inferred eigen(t_sym) + Λ, Φ = E + Λa, Φa = eigen(Array(t_sym)) + + @test Λ ≈ (@inferred eigvals(t_sym)) ≈ eigvals(E) ≈ Λa + @test Φ ≈ (@inferred eigvecs(t_sym)) ≈ eigvecs(E) + for i in 1:dim + # scale with first element of eigenvector to account for possible directions + @test Φ[:, i]*Φ[1, i] ≈ Φa[:, i]*Φa[1, i] + end + + # test eigenfactorizations for a diagonal tensor + v = rand(T, dim) + d_sym = diagm(SymmetricTensor{2, dim, T}, v) + E = @inferred eigen(d_sym) + Λ, Φ = E + Λa, Φa = eigen(Symmetric(Array(d_sym))) + + @test Λ ≈ (@inferred eigvals(d_sym)) ≈ eigvals(E) ≈ Λa + @test Φ ≈ (@inferred eigvecs(d_sym)) ≈ eigvecs(E) + + # sqrt + Apd = tdot(t_sym) + @test sqrt(Apd) ⋅ sqrt(Apd) ≈ Apd +end +end # of testset + +@testsection "eigen(::FourthOrderTensor)" begin +for T in (Float32, Float64), dim in (1, 2, 3) + Random.seed!(123) + # construct positive definite Voigt-tensor + n = dim*dim - div((dim-1)*dim, 2) + A = rand(T, n, n); A = A'A + I + Aval, Avec = eigen(Hermitian(A)) + perm = sortperm(Aval) + Aval = Aval[perm] + Avec = [Avec[:, i] for i in perm] + + S = frommandel(SymmetricTensor{4,dim,T}, A) + + E = eigen(S) + @test eigvals(E) ≈ Aval + S′ = zero(S) + for i in 1:n + m = tomandel(eigvecs(E)[i]) + @test m / m[1] ≈ Avec[i] / Avec[i][1] + @test S ⊡ E.vectors[i] ≈ E.values[i] * E.vectors[i] + @test norm(E.vectors[i]) ≈ 1 + for j in 1:n + if i == j + @test E.vectors[i] ⊡ E.vectors[j] ≈ 1 + else + @test E.vectors[i] ⊡ E.vectors[j] ≈ 0 atol=10eps(T) + end + end + S′ += E.values[i] * (E.vectors[i] ⊗ E.vectors[i]) + end + @test S ≈ S′ + a, b = E # iteration + @test a == eigvals(E) == E.values + @test b == eigvecs(E) == E.vectors +end +end + +# https://en.wikiversity.org/wiki/Continuum_mechanics/Tensor_algebra_identities +@testsection "tensor identities" begin +for T in (Float32, Float64, F64) + for dim in (1,2,3) + # Identities with second order and first order + A = rand(Tensor{2, dim, T}) + B = rand(Tensor{2, dim, T}) + C = rand(Tensor{2, dim, T}) + I = one(Tensor{2, dim, T}) + a = rand(Tensor{1, dim, T}) + b = rand(Tensor{1, dim, T}) + + @test A ⊡ B ≈ (A' ⋅ B) ⊡ one(A) + @test A ⊡ (a ⊗ b) ≈ (A ⋅ b) ⋅ a + @test (A ⋅ a) ⋅ (B ⋅ b) ≈ (A' ⋅ B) ⊡ (a ⊗ b) + @test (A ⋅ a) ⊗ b ≈ A ⋅ (a ⊗ b) + @test a ⊗ (A ⋅ b) ≈ (A ⋅ (b ⊗ a))' + @test a ⊗ (A ⋅ b) ≈ (a ⊗ b) ⋅ A' + + @test A ⊡ I ≈ tr(A) + @test det(A) ≈ det(A') + @test tr(inv(A) ⋅ A) ≈ dim + @test inv(A) ⋅ A ≈ A \ A ≈ I + @test inv(A) ⋅ a ≈ A \ a + + @test (I ⊗ I) ⊡ A ≈ tr(A) * I + @test (I ⊗ I) ⊡ A ⊡ A ≈ tr(A)^2 + + @test A ⋅ a ≈ a ⋅ A' + + A_sym = rand(SymmetricTensor{2, dim}) + B_sym = rand(SymmetricTensor{2, dim}) + C_sym = rand(SymmetricTensor{2, dim}) + I_sym = one(SymmetricTensor{2, dim}) + + @test A_sym ⊡ I_sym ≈ tr(A_sym) + @test det(A_sym) ≈ det(A_sym') + + @test (I_sym ⊗ I_sym) ⊡ A_sym ≈ tr(A_sym) * I_sym + @test ((I_sym ⊗ I_sym) ⊡ A_sym) ⊡ A_sym ≈ tr(A_sym)^2 + end +end + +for T in (Float32, Float64, F64) + for dim in (1,2,3) + # Identities with identity tensor + II = one(Tensor{4, dim, T}) + I = one(Tensor{2, dim, T}) + AA = rand(Tensor{4, dim, T}) + A = rand(Tensor{2, dim, T}) + II_sym = one(SymmetricTensor{4, dim, T}) + I_sym = one(SymmetricTensor{2, dim, T}) + AA_sym = rand(SymmetricTensor{4, dim, T}) + A_sym = rand(SymmetricTensor{2, dim, T}) + + @test II ⊡ AA ≈ AA + @test AA ⊡ II ≈ AA + @test II ⊡ A ≈ A + @test A ⊡ II ≈ A + @test II ⊡ A ⊡ A ≈ (tr(A' ⋅ A)) + + @test II ⊡ AA_sym ≈ AA_sym + @test AA_sym ⊡ II ≈ AA_sym + @test II ⊡ A_sym ≈ A_sym + @test A_sym ⊡ II ≈ A_sym + @test II ⊡ A_sym ⊡ A_sym ≈ (tr(A_sym' ⋅ A_sym)) + + @test II_sym ⊡ AA_sym ≈ AA_sym + @test AA_sym ⊡ II_sym ≈ AA_sym + @test II_sym ⊡ A_sym ≈ A_sym + @test A_sym ⊡ II_sym ≈ A_sym + @test II_sym ⊡ A_sym ⊡ A_sym ≈ (tr(A_sym' ⋅ A_sym)) + end +end + +for T in (Float64,) + for dim in 1:3 + # Tested operations + # S2 ⋅ S3, S3 ⋅ S2 + # S1 ⊗ S2, S2 ⊗ S1 + # S3 ⊡ S2, S2 ⊡ S3 + + # Identities involving 3rd order tensors + I2 = one(Tensor{2,dim,T}) + A2 = rand(Tensor{2,dim,T}) + S2 = rand(Tensor{2,dim,T}) + S2s = rand(SymmetricTensor{2,dim,T}) + S2sr = Tensor{2,dim,T}(S2s) + v = rand(Vec{dim,T}) + u = rand(Vec{dim,T}) + @test (u⊗I2)⋅v ≈ (u⊗v) + @test u⋅(I2⊗v) ≈ (u⊗v) + @test I2⋅(v⊗S2) ≈ (v⊗S2) + @test v ⊗ (S2⋅A2) ≈ (v ⊗ S2)⋅A2 + @test v ⊗ S2s ≈ v ⊗ S2sr + @test S2s ⊗ v ≈ S2sr ⊗ v + @test (v⊗S2)⊡A2 ≈ v*(S2⊡A2) + @test (v⊗S2)⊡S2s ≈ v*(S2⊡S2s) + @test A2⊡(S2⊗v) ≈ v*(S2⊡A2) + @test S2s⊡(S2⊗v) ≈ v*(S2⊡S2s) + end +end +end # of testset + +@testsection "promotion/conversion" begin +T = Float32 +WIDE_T = widen(T) +for dim in (1,2,3), order in (1,2,4) + + tens = Tensor{order, dim, T, dim^order} + tens_wide = Tensor{order, dim, WIDE_T, dim^order} + + @test promote_type(tens, tens) == tens + @test promote_type(tens_wide, tens) == tens_wide + @test promote_type(tens, tens_wide) == tens_wide + @test promote_type(tens_wide, tens_wide) == tens_wide + + At = rand(tens) + Bt = rand(tens_wide) + @test isa((@inferred At + Bt), tens_wide) + @test isa((@inferred convert(Tensor, At)), tens) + @test isa((@inferred convert(Tensor{order, dim, T}, At)), tens) + @test isa((@inferred convert(tens, At)), tens) + @test isa((@inferred convert(Tensor{order, dim, WIDE_T}, At)), tens_wide) + @test isa((@inferred convert(tens_wide, At)), tens_wide) + + if order != 1 + M = Tensors.n_components(SymmetricTensor{order, dim}) + sym = SymmetricTensor{order, dim, T, M} + sym_wide = SymmetricTensor{order, dim, WIDE_T, M} + + @test promote_type(sym, sym) == sym + @test promote_type(sym_wide, sym) == sym_wide + @test promote_type(sym, sym_wide) == sym_wide + @test promote_type(sym_wide, sym_wide) == sym_wide + + @test promote_type(sym, tens) == tens + @test promote_type(tens, sym) == tens + @test promote_type(sym_wide, tens) == tens_wide + @test promote_type(tens, sym_wide) == tens_wide + @test promote_type(sym, tens_wide) == tens_wide + @test promote_type(tens_wide, sym) == tens_wide + @test promote_type(sym_wide, tens_wide) == tens_wide + + As = rand(sym) + Bs = rand(sym_wide) + @test isa((@inferred As + Bs), sym_wide) + + @test isa((@inferred convert(SymmetricTensor, As)), sym) + @test isa((@inferred convert(SymmetricTensor{order, dim, T}, As)), sym) + @test isa((@inferred convert(sym, As)), sym) + @test isa((@inferred convert(SymmetricTensor{order, dim, WIDE_T}, As)), sym_wide) + @test isa((@inferred convert(sym_wide, As)), sym_wide) + + # SymmetricTensor -> Tensor + @test (@inferred convert(Tensor, As))::tens ≈ As ≈ Array(As) + @test (@inferred convert(Tensor{order, dim}, As))::tens ≈ As ≈ Array(As) + @test (@inferred convert(Tensor{order, dim, WIDE_T}, As))::tens_wide ≈ As ≈ Array(As) + @test (@inferred convert(tens_wide, As))::tens_wide ≈ As ≈ Array(As) + + # Tensor -> SymmetricTensor + if dim != 1 + @test_throws InexactError convert(SymmetricTensor, At) + @test_throws InexactError convert(SymmetricTensor{order, dim}, At) + @test_throws InexactError convert(SymmetricTensor{order, dim, WIDE_T}, At) + @test_throws InexactError convert(typeof(Bs), At) + end + end +end +end # of testset + +using Unitful +@testset "unitful" begin; for dim in 1:3 + A = rand(Tensor{2,dim}) + As = rand(SymmetricTensor{2,dim}) + @test one(A * 1u"Pa")::Tensor{2,dim,Float64} == one(A)::Tensor{2,dim,Float64} + @test one(As * 1u"Pa")::SymmetricTensor{2,dim,Float64} == one(As)::SymmetricTensor{2,dim,Float64} + + # Eigen of 2nd order + M = rand(dim, dim) + 5I; M = M'M + S = SymmetricTensor{2,dim}(M) + SU = SymmetricTensor{2,dim}(M * 1u"Pa") + ST = convert(Tensor, S) + SUT = convert(Tensor, SU) + @test eigvals(Hermitian(M)) ≈ eigvals(S) ≈ ustrip.(eigvals(SU)) + same_no_sign(args...) = all(args) do arg; all(1:dim) do i + args[1][:, i] ≈ arg[:, i] || args[1][:, i] ≈ -arg[:, i] + end end + @test same_no_sign(eigvecs(Hermitian(M)), eigvecs(S), ustrip.(eigvecs(SU))) + @test eltype(eigvals(SU)) == typeof(1.0u"Pa") + @test eltype(eigvecs(SU)) == typeof(one(1.0u"Pa")) + # inv + @test inv(M) ≈ inv(S) ≈ inv(ST) ≈ + ustrip.(inv(SU)) ≈ ustrip.(inv(SUT)) + # (to|from)(voigt|mandel) + @test fromvoigt(SymmetricTensor{2,dim}, tovoigt(SU)) == SU + @test fromvoigt(SymmetricTensor{2,dim}, tovoigt(SU; offdiagscale=5.0); offdiagscale=5.0) ≈ SU + @test frommandel(SymmetricTensor{2,dim}, tomandel(SU)) ≈ SU + @test fromvoigt(Tensor{2,dim}, tovoigt(SUT)) == SUT + # Eigen of 4th order + M = rand(dim+dim, dim+dim) + 5I; M = M'M + S = fromvoigt(SymmetricTensor{4,dim}, M) + SU = fromvoigt(SymmetricTensor{4,dim}, M * 1u"Pa") + @test eigvals(S) ≈ ustrip.(eigvals(SU)) + @test eigvecs(S) ≈ eigvecs(SU) + @test eltype(eigvals(SU)) == typeof(1.0u"Pa") + @test eltype(eigvecs(SU)[1]) == typeof(one(1.0u"Pa")) + # inv + @test inv(S) ⊡ S ≈ inv(SU) ⊡ SU ≈ one(S) + M = rand(dim^2, dim^2) + 5I; M = M'M + ST = fromvoigt(Tensor{4,dim}, M) + SUT = fromvoigt(Tensor{4,dim}, M * 1u"Pa") + @test inv(ST) ⊡ ST ≈ inv(SUT) ⊡ SUT ≈ one(ST) + # (to|from)(voigt|mandel) + @test fromvoigt(SymmetricTensor{4,dim}, tovoigt(SU)) == SU + @test fromvoigt(SymmetricTensor{4,dim}, tovoigt(SU; offdiagscale=5.0); offdiagscale=5.0) ≈ SU + @test frommandel(SymmetricTensor{4,dim}, tomandel(SU)) ≈ SU + @test fromvoigt(Tensor{4,dim}, tovoigt(SUT)) == SUT +end end + +@testset "issues #100, #101: orders of eigenvectors" begin + for (i, j) in ((1, 2), (2, 1)) + S = SymmetricTensor{2,2,Float64}((i, 0, j)) + Λ = diagm(SymmetricTensor{2,2}, eigvals(S)) + Φ = eigvecs(S) + @test Φ ⋅ Λ ⋅ Φ' ≈ S + end + for (i, j, k) in ((1, 2, 3), (1, 3, 2), (2, 1, 3), (2, 3, 1), (3, 1, 2), (3, 2, 1)) + S = SymmetricTensor{2,3,Float64}((i, 0, 0, j, 0, k)) + Λ = diagm(SymmetricTensor{2,3}, eigvals(S)) + Φ = eigvecs(S) + @test Φ ⋅ Λ ⋅ Φ' ≈ S + end +end + +@testset "issue #166: NaN in eigenvectors" begin + M = [ 50.0 -10.0 0.0 + -10.0 40.0 0.0 + 0.0 0.0 0.0] + ME = eigen(Hermitian(M)) + S = SymmetricTensor{2,3}(M) + SE = eigen(S) + @test eigvals(SE) ≈ eigvals(ME) + @test eigvecs(SE) ≈ eigvecs(ME) +end + +@testsection "exceptions" begin + # normal multiplication + A = rand(Tensor{2, 3}) + B = rand(Tensor{2, 3}) + @test_throws Exception A*B + @test_throws Exception A'*B + + AA = rand(Tensor{4, 2}) + A2 = rand(Tensor{2, 2}) + @test_throws DimensionMismatch A + A2 + @test_throws DimensionMismatch AA - A + + # transpose/adjoint of Vec + x = rand(Vec{3}) + @test_throws ArgumentError x' + @test_throws ArgumentError transpose(x) + @test_throws ArgumentError adjoint(x) +end # of testset diff --git a/test/test_ops.jl b/test/test_ops.jl index f842139b..048db527 100644 --- a/test/test_ops.jl +++ b/test/test_ops.jl @@ -1,357 +1,357 @@ -function _permutedims(S::FourthOrderTensor{dim}, idx::NTuple{4,Int}) where dim - sort([idx...]) == [1,2,3,4] || throw(ArgumentError("Missing index.")) - neworder = sortperm([idx...]) - f = (i,j,k,l) -> S[[i,j,k,l][neworder]...] - return Tensor{4,dim}(f) -end - -@testsection "tensor ops" begin -for T in (Float32, Float64, F64), dim in (1,2,3) -println("T = $T, dim = $dim") -AA = rand(Tensor{4, dim, T}) -BB = rand(Tensor{4, dim, T}) -A3 = rand(Tensor{3, dim, T}) -B3 = rand(Tensor{3, dim, T}) -A = rand(Tensor{2, dim, T}) -B = rand(Tensor{2, dim, T}) -a = rand(Tensor{1, dim, T}) -b = rand(Tensor{1, dim, T}) - -AA_sym = rand(SymmetricTensor{4, dim, T}) -BB_sym = rand(SymmetricTensor{4, dim, T}) -A_sym = rand(SymmetricTensor{2, dim, T}) -B_sym = rand(SymmetricTensor{2, dim, T}) -symA = symmetric(A) -symB = symmetric(B) - -i,j,k,l = rand(1:dim,4) - -@testsection "double contraction" begin - # 4 - 4 - @test vec((@inferred dcontract(AA, BB))::Tensor{4, dim, T}) ≈ vec(collect(reshape(vec(AA), (dim^2, dim^2))) * collect(reshape(vec(BB), (dim^2, dim^2)))) - @test vec((@inferred dcontract(AA_sym, BB))::Tensor{4, dim, T}) ≈ vec(collect(reshape(vec(AA_sym), (dim^2, dim^2))) * collect(reshape(vec(BB), (dim^2, dim^2)))) - @test vec((@inferred dcontract(AA, BB_sym))::Tensor{4, dim, T}) ≈ vec(collect(reshape(vec(AA), (dim^2, dim^2))) * collect(reshape(vec(BB_sym), (dim^2, dim^2)))) - @test vec((@inferred dcontract(AA_sym, BB_sym))::SymmetricTensor{4, dim, T}) ≈ vec(collect(reshape(vec(AA_sym), (dim^2, dim^2))) * collect(reshape(vec(BB_sym), (dim^2, dim^2)))) - @test dcontract(convert(Tensor, AA_sym), convert(Tensor, BB_sym)) ≈ dcontract(AA_sym, BB_sym) - - # 3 - 4 - @test dcontract(AA, A3) ≈ Tensor{3,dim}((i,j,k) -> sum(AA[i,j,m,n]*A3[m,n,k] for m in 1:dim, n in 1:dim)) - @test dcontract(A3, AA) ≈ Tensor{3,dim}((i,j,k) -> sum(A3[i,m,n]*AA[m,n,j,k] for m in 1:dim, n in 1:dim)) - - # 2 - 4 - @test (@inferred dcontract(AA, A))::Tensor{2, dim, T} ≈ reshape(collect(reshape(vec(AA), (dim^2, dim^2))) * collect(reshape(vec(A), (dim^2,))), dim, dim) - @test (@inferred dcontract(AA_sym, A))::SymmetricTensor{2, dim, T} ≈ reshape(collect(reshape(vec(AA_sym), (dim^2, dim^2))) * collect(reshape(vec(A), (dim^2,))), dim, dim) - @test (@inferred dcontract(AA, A_sym))::Tensor{2, dim, T} ≈ reshape(collect(reshape(vec(AA), (dim^2, dim^2))) * collect(reshape(vec(A_sym), (dim^2,))), dim, dim) - @test (@inferred dcontract(AA_sym, A_sym))::SymmetricTensor{2, dim, T} ≈ reshape(collect(reshape(vec(AA_sym), (dim^2, dim^2))) * collect(reshape(vec(A_sym), (dim^2,))), dim, dim) - @test (@inferred dcontract(A, AA))::Tensor{2, dim, T} ≈ reshape(collect(reshape(vec(AA), (dim^2, dim^2))') * collect(reshape(vec(A), (dim^2,))), dim, dim) - @test (@inferred dcontract(A_sym, AA))::Tensor{2, dim, T} ≈ reshape(collect(reshape(vec(AA), (dim^2, dim^2))') * collect(reshape(vec(A_sym), (dim^2,))), dim, dim) - @test (@inferred dcontract(A, AA_sym))::SymmetricTensor{2, dim, T} ≈ reshape(collect(reshape(vec(AA_sym), (dim^2, dim^2))') * collect(reshape(vec(A), (dim^2,))), dim, dim) - @test (@inferred dcontract(A_sym, AA_sym))::SymmetricTensor{2, dim, T} ≈ reshape(collect(reshape(vec(AA_sym), (dim^2, dim^2))') * collect(reshape(vec(A_sym), (dim^2,))), dim, dim) - @test dcontract(convert(Tensor, AA_sym), convert(Tensor, A_sym)) ≈ dcontract(AA_sym, A_sym) - - # 2 - 3 - @test dcontract(A, A3) ≈ Tensor{1,dim}((i) -> sum(A[m,n]*A3[m,n,i] for m in 1:dim, n in 1:dim)) - @test dcontract(A3, A) ≈ Tensor{1,dim}((i) -> sum(A3[i,m,n]*A[m,n] for m in 1:dim, n in 1:dim)) - - # 2 - 2 - @test (@inferred dcontract(A, B))::T ≈ sum(vec(A) .* vec(B)) - @test (@inferred dcontract(A_sym, B))::T ≈ sum(vec(A_sym) .* vec(B)) - @test (@inferred dcontract(A, B_sym))::T ≈ sum(vec(A) .* vec(B_sym)) - @test (@inferred dcontract(A_sym, B_sym))::T ≈ sum(vec(A_sym) .* vec(B_sym)) -end # of testsection - -@testsection "outer products (otimes, otimesu, otimesl)" begin - # binary otimes - @test (@inferred otimes(a, b))::Tensor{2, dim, T} ≈ Array(a) * Array(b)' - @test reshape(vec((@inferred otimes(A, B))::Tensor{4, dim, T}), dim^2, dim^2) ≈ vec(A) * vec(B)' - @test reshape(vec((@inferred otimes(A_sym, B))::Tensor{4, dim, T}), dim^2, dim^2) ≈ vec(A_sym) * vec(B)' - @test reshape(vec((@inferred otimes(A, B_sym))::Tensor{4, dim, T}), dim^2, dim^2) ≈ vec(A) * vec(B_sym)' - @test reshape(vec((@inferred otimes(A_sym, B_sym))::SymmetricTensor{4, dim, T}), dim^2, dim^2) ≈ vec(A_sym) * vec(B_sym)' - # unary otimes - @test (@inferred otimes(a))::SymmetricTensor{2, dim, T} ≈ otimes(a, a) - - # binary otimesu - @test (@inferred otimesu(A, B))::Tensor{4, dim, T} ≈ _permutedims(otimes(A, B), (1,3,2,4)) - @test (@inferred otimesu(A_sym, B))::Tensor{4, dim, T} ≈ _permutedims(otimes(A_sym, B), (1,3,2,4)) - @test (@inferred otimesu(A, B_sym))::Tensor{4, dim, T} ≈ _permutedims(otimes(A, B_sym), (1,3,2,4)) - @test (@inferred otimesu(A_sym, B_sym))::Tensor{4, dim, T} ≈ _permutedims(otimes(A_sym, B_sym), (1,3,2,4)) - - # binary otimesl - @test (@inferred otimesl(A, B))::Tensor{4, dim, T} ≈ _permutedims(otimes(A, B), (1,3,4,2)) - @test (@inferred otimesl(A_sym, B))::Tensor{4, dim, T} ≈ _permutedims(otimes(A_sym, B), (1,3,4,2)) - @test (@inferred otimesl(A, B_sym))::Tensor{4, dim, T} ≈ _permutedims(otimes(A, B_sym), (1,3,4,2)) - @test (@inferred otimesl(A_sym, B_sym))::Tensor{4, dim, T} ≈ _permutedims(otimes(A_sym, B_sym), (1,3,4,2)) -end # of testsection - -@testsection "dot products" begin - # 1 - 1 - @test (@inferred dot(a, b))::T ≈ sum(Array(a) .* Array(b)) - # 1 - 2 - @test (@inferred dot(A, b))::Vec{dim, T} ≈ Array(A) * Array(b) - @test (@inferred dot(A_sym, b))::Vec{dim, T} ≈ Array(A_sym) * Array(b) - @test (@inferred dot(a, B))::Vec{dim, T} ≈ Array(B)' * Array(a) - @test (@inferred dot(a, B_sym))::Vec{dim, T} ≈ Array(B_sym)' * Array(a) - # 2 - 2 - # binary - @test (@inferred dot(A, B))::Tensor{2, dim, T} ≈ Array(A) * Array(B) - @test (@inferred dot(A_sym, B))::Tensor{2, dim, T} ≈ Array(A_sym) * Array(B) - @test (@inferred dot(A, B_sym))::Tensor{2, dim, T} ≈ Array(A) * Array(B_sym) - @test (@inferred dot(A_sym, B_sym))::Tensor{2, dim, T} ≈ Array(A_sym) * Array(B_sym) - # unary - @test (@inferred dot(A_sym))::SymmetricTensor{2, dim, T} ≈ dot(A_sym, A_sym) - @test (@inferred tdot(A))::SymmetricTensor{2, dim, T} ≈ dot(transpose(A), A) - @test (@inferred tdot(A_sym))::SymmetricTensor{2, dim, T} ≈ dot(transpose(A_sym), A_sym) - @test (@inferred dott(A))::SymmetricTensor{2, dim, T} ≈ dot(A, transpose(A)) - @test (@inferred dott(A_sym))::SymmetricTensor{2, dim, T} ≈ dot(A_sym, transpose(A_sym)) - # 2 - 4 - @test (@inferred dot(AA, B))::Tensor{4, dim, T} ≈ reshape(collect(reshape(vec(AA), (dim^3, dim))) * collect(reshape(vec(B), (dim, dim))), (dim, dim, dim, dim)) - @test (@inferred dot(B, AA))::Tensor{4, dim, T} ≈ reshape(collect(reshape(vec(B), (dim, dim))) * collect(reshape(vec(AA), (dim, dim^3))), (dim, dim, dim, dim)) - @test (@inferred dot(AA_sym, B))::Tensor{4, dim, T} ≈ reshape(collect(reshape(vec(AA_sym), (dim^3, dim))) * collect(reshape(vec(B), (dim, dim))), (dim, dim, dim, dim)) - @test (@inferred dot(B, AA_sym))::Tensor{4, dim, T} ≈ reshape(collect(reshape(vec(B), (dim, dim))) * collect(reshape(vec(AA_sym), (dim, dim^3))), (dim, dim, dim, dim)) - @test (@inferred dot(AA, B_sym))::Tensor{4, dim, T} ≈ reshape(collect(reshape(vec(AA), (dim^3, dim))) * collect(reshape(vec(B_sym), (dim, dim))), (dim, dim, dim, dim)) - @test (@inferred dot(B_sym, AA))::Tensor{4, dim, T} ≈ reshape(collect(reshape(vec(B_sym), (dim, dim))) * collect(reshape(vec(AA), (dim, dim^3))), (dim, dim, dim, dim)) - @test (@inferred dot(AA_sym, B_sym))::Tensor{4, dim, T} ≈ reshape(collect(reshape(vec(AA_sym), (dim^3, dim))) * collect(reshape(vec(B_sym), (dim, dim))), (dim, dim, dim, dim)) - @test (@inferred dot(B_sym, AA_sym))::Tensor{4, dim, T} ≈ reshape(collect(reshape(vec(B_sym), (dim, dim))) * collect(reshape(vec(AA_sym), (dim, dim^3))), (dim, dim, dim, dim)) -end # of testsection - -@testsection "symmetric/skew-symmetric" begin - if dim == 1 # non-symmetric tensors are symmetric - @test (@inferred issymmetric(A)) - @test (@inferred issymmetric(AA)) - elseif dim != 1 - @test !(@inferred issymmetric(A)) - @test !(@inferred issymmetric(AA)) - @test !(@inferred ismajorsymmetric(AA)) - @test !(@inferred isminorsymmetric(AA)) - @test_throws InexactError convert(typeof(A_sym),A) - @test_throws InexactError convert(typeof(AA_sym),AA) - end - @test (@inferred issymmetric(A_sym)) - @test (@inferred issymmetric(AA_sym)) - @test (@inferred isminorsymmetric(AA_sym)) - @test (@inferred issymmetric(symmetric(A))) - @test (@inferred issymmetric(A + A')) - - @test (@inferred symmetric(A))::SymmetricTensor{2, dim, T} ≈ 0.5(A + A') - @test (@inferred symmetric(A_sym))::SymmetricTensor{2, dim, T} ≈ A_sym - @test convert(typeof(A_sym),convert(Tensor,symmetric(A))) ≈ symmetric(A) - - @test (@inferred symmetric(AA))::SymmetricTensor{4, dim, T} ≈ (@inferred minorsymmetric(AA))::SymmetricTensor{4, dim, T} - @test minorsymmetric(AA)[i,j,k,l] ≈ minorsymmetric(AA)[j,i,l,k] - @test issymmetric(convert(Tensor,minorsymmetric(AA))) - @test isminorsymmetric(convert(Tensor,minorsymmetric(AA))) - @test (@inferred symmetric(AA_sym))::SymmetricTensor{4, dim, T} ≈ (@inferred minorsymmetric(AA_sym))::SymmetricTensor{4, dim, T} - @test minorsymmetric(AA_sym)[i,j,k,l] ≈ minorsymmetric(AA_sym)[j,i,l,k] - @test minorsymmetric(AA_sym) ≈ AA_sym - @test issymmetric(convert(Tensor,minorsymmetric(AA_sym))) - @test isminorsymmetric(convert(Tensor,minorsymmetric(AA_sym))) - @test minorsymmetric(A⊗B) ≈ symA⊗symB - - @test convert(typeof(AA_sym),convert(Tensor,minorsymmetric(AA))) ≈ minorsymmetric(AA) - - @test ((@inferred majorsymmetric(AA))::Tensor{4, dim, T})[i,j,k,l] ≈ 0.5*(AA[i,j,k,l] + AA[k,l,i,j]) - @test majorsymmetric(AA)[i,j,k,l] ≈ majorsymmetric(AA)[k,l,i,j] - @test ismajorsymmetric(majorsymmetric(AA)) - @test ((@inferred majorsymmetric(AA_sym))::SymmetricTensor{4, dim, T})[i,j,k,l] ≈ 0.5*(AA_sym[i,j,k,l] + AA_sym[k,l,i,j]) - @test majorsymmetric(AA_sym)[i,j,k,l] ≈ majorsymmetric(AA_sym)[k,l,i,j] - @test ismajorsymmetric(majorsymmetric(AA_sym)) - - @test (@inferred skew(A))::Tensor{2, dim, T} ≈ 0.5(A - A') - @test (@inferred skew(A_sym))::Tensor{2, dim, T} ≈ zero(A_sym) - - # Identities - @test A ≈ symmetric(A) + skew(A) - @test skew(A) ≈ -skew(A)' - @test tr(skew(A)) ≈ 0.0 - @test tr(symmetric(A)) ≈ tr(A) -end # of testsection - -@testsection "transpose" begin - @test (@inferred transpose(A))::Tensor{2, dim, T} ≈ Array(A)' - @test transpose(transpose(A)) ≈ A - @test (@inferred transpose(A_sym))::SymmetricTensor{2, dim, T} ≈ A_sym ≈ Array(A_sym)' - @test transpose(transpose(A_sym)) ≈ A_sym - - @test (@inferred transpose(AA))::Tensor{4, dim, T} ≈ (@inferred minortranspose(AA))::Tensor{4, dim, T} - @test AA[i,j,k,l] ≈ minortranspose(AA)[j,i,l,k] - @test AA_sym[i,j,k,l] ≈ minortranspose(AA_sym)[j,i,l,k] - @test (@inferred transpose(AA_sym))::SymmetricTensor{4, dim, T} ≈ (@inferred minortranspose(AA_sym))::SymmetricTensor{4, dim, T} ≈ AA_sym - @test minortranspose(minortranspose(AA)) ≈ AA - @test minortranspose(minortranspose(AA_sym)) ≈ AA_sym - - @test majortranspose((@inferred majortranspose(AA))::Tensor{4, dim, T}) ≈ AA - @test majortranspose((@inferred majortranspose(AA_sym))::Tensor{4, dim, T}) ≈ AA_sym - @test AA[i,j,k,l] ≈ majortranspose(AA)[k,l,i,j] - @test AA_sym[i,j,k,l] ≈ majortranspose(AA_sym)[k,l,i,j] - - @test minortranspose(AA) ≈ _permutedims(AA,(2,1,4,3)) - @test minortranspose(AA_sym) ≈ _permutedims(AA_sym,(2,1,4,3)) - @test majortranspose(AA) ≈ _permutedims(AA,(3,4,1,2)) - @test majortranspose(AA_sym) ≈ _permutedims(AA_sym,(3,4,1,2)) -end # of testsection - -@testsection "cross product" begin - @test (@inferred a × a)::Vec{3, T} ≈ Vec{3, T}((0.0,0.0,0.0)) - @test a × b ≈ -b × a - if dim == 2 - ad = Vec{2, T}((1.0,0.0)) - ad2 = Vec{2, T}((0.0,1.0)) - @test (@inferred ad × ad2)::Vec{3, T} ≈ Vec{3, T}((0.0, 0.0, 1.0)) - end - if dim == 3 - ad = Vec{3, T}((1.0,0.0,0.0)) - ad2 = Vec{3, T}((0.0,1.0,0.0)) - @test (@inferred ad × ad2)::Vec{3, T} ≈ Vec{3, T}((0.0, 0.0, 1.0)) - end - if T == Float64 # mixed eltype - @test rand(Vec{dim,Float64}) × rand(Vec{dim,Float32}) isa Vec{3,Float64} - end -end # of testsection - -@testsection "special" begin - AAT = Tensor{4, dim, T}((i,j,k,l) -> AA_sym[i,l,k,j]) - @test AAT ⊡ (b ⊗ a) ≈ (@inferred dotdot(a, AA_sym, b))::Tensor{2, dim, T} -end # of testsection - -@testsection "rotation" begin - if dim == 3 - x = eᵢ(Vec{3, T}, 1) - y = eᵢ(Vec{3, T}, 2) - z = eᵢ(Vec{3, T}, 3) - - @test (@inferred rotate(z, z, rand(T)))::Vec{3, T} ≈ z - @test rotate(2*z, y, π/2) ≈ 2*x - @test rotate(3*z, y, π) ≈ -3*z - @test rotate(x+y+z, z, π/4) ≈ Vec{3}((0.0,√2,1.0)) - - @test rotate(a, b, 0) ≈ a ≈ rotation_tensor(b, 0) ⋅ a - @test rotate(a, b, π) ≈ rotate(a, b, -π) - @test rotate(a, b, π) ≈ rotation_tensor(b, π) ⋅ a rtol=1e-6 - @test rotate(a, b, π/2) ≈ rotate(a, -b, -π/2) - @test rotate(a, b, π/2) ≈ rotation_tensor(b, π/2) ⋅ a rtol=1e-6 - - @test rotate(A, x, π) ≈ rotate(A, x, -π) - @test rotate(rotate(rotate(A, x, π), y, π), z, π) ≈ A - @test rotate(A, a, 0) ≈ A - @test rotate(A, a, π/2) ≈ rotate(A, -a, -π/2) - A_sym_T = convert(Tensor{2,3}, A_sym) - @test rotate(A_sym, x, π)::SymmetricTensor ≈ rotate(A_sym, x, -π)::SymmetricTensor ≈ rotate(A_sym_T, x, -π) - @test rotate(rotate(rotate(A_sym, x, π), y, π), z, π)::SymmetricTensor ≈ A_sym - @test rotate(A_sym, a, 0) ≈ A_sym - @test rotate(A_sym, a, π/2) ≈ rotate(A_sym, -a, -π/2) - @test rotate(A_sym, π/4, π/4, π/4) ≈ rotate(A_sym, -3π/4, 3π/4, -3π/4) - @test rotate(A, π/4, π/4, π/4) ≈ rotate(A, -3π/4, 3π/4, -3π/4) - @test rotate(one(A), π/4, π/4, π/4) ≈ rotate(one(A), -3π/4, 3π/4, -3π/4) ≈ one(A) - - @test rotate(AA, x, π)::Tensor ≈ rotate(AA, x, -π) - @test rotate(rotate(rotate(AA, x, π), y, π), z, π) ≈ AA - @test rotate(AA, a, 0) ≈ AA - @test rotate(AA, a, π/2) ≈ rotate(AA, -a, -π/2) - AA_sym_T = convert(Tensor{4,3}, AA_sym) - @test rotate(AA_sym, x, π)::SymmetricTensor ≈ rotate(AA_sym, x, -π) ≈ rotate(AA_sym_T, x, π) - @test rotate(rotate(rotate(AA_sym, x, π), y, π), z, π) ≈ AA_sym - @test rotate(AA_sym, a, 0) ≈ AA_sym - @test rotate(AA_sym, a, π/2) ≈ rotate(AA_sym, -a, -π/2) - - v1, v2, v3, v4, axis = [rand(Vec{3,T}) for _ in 1:5] - α = rand(T) * π - R = rotation_tensor(axis, α) - v1v2v3v4 = (v1 ⊗ v2) ⊗ (v3 ⊗ v4) - Rv1v2v3v4 = ((R ⋅ v1) ⊗ (R ⋅ v2)) ⊗ ((R ⋅ v3) ⊗ (R ⋅ v4)) - @test rotate(v1v2v3v4, axis, α) ≈ Rv1v2v3v4 - v1v1v2v2 = otimes(v1) ⊗ otimes(v2) - Rv1v1v2v2 = otimes(R ⋅ v1) ⊗ otimes(R ⋅ v2) - @test rotate(v1v1v2v2, axis, α) ≈ Rv1v1v2v2 - end - if dim == 2 - θ = T(pi/2) - zT = zero(T) - z = Vec{3}((zT, zT, one(T))) - r = 1:2 - R = rotation_tensor(θ) - R3 = rotation_tensor(z, θ) - @test R ≈ R3[r, r] - a3 = Vec{3}((a[1], a[2], zT)) - @test rotate(a, θ) ≈ R ⋅ a ≈ rotate(a3, z, θ)[r] - A3 = Tensor{2,3}((i,j) -> i < 3 && j < 3 ? A[i, j] : zT) - @test rotate(A, θ) ≈ rotate(A3, z, θ)[r, r] - A3s = SymmetricTensor{2,3}((i,j) -> i < 3 && j < 3 ? A_sym[i, j] : zT) - @test rotate(A_sym, θ) ≈ rotate(A3s, z, θ)[r, r] - AA3 = Tensor{4,3}((i,j,k,l) -> i < 3 && j < 3 && k < 3 && l < 3 ? AA[i, j, k, l] : zT) - @test rotate(AA, θ) ≈ rotate(AA3, z, θ)[r, r, r, r] - AA3s = SymmetricTensor{4,3}((i,j,k,l) -> i < 3 && j < 3 && k < 3 && l < 3 ? AA_sym[i, j, k, l] : zT) - @test rotate(AA_sym, θ) ≈ rotate(AA3s, z, θ)[r, r, r, r] - end -end - -@testsection "tovoigt/fromvoigt" begin - # https://classes.engineering.wustl.edu/2009/spring/mase5513/abaqus/docs/v6.6/books/usb/default.htm?startat=pt01ch01s02aus02.html - abaqus = dim == 1 ? fill(1, 1, 1) : dim == 2 ? [1 3; 0 2] : [1 4 6; 0 2 5; 0 0 3] - T2 = T(2) - @test (@inferred tovoigt(AA)) * (@inferred tovoigt(A)) ≈ tovoigt(AA ⊡ A) - @test (@inferred tovoigt(AA_sym)) * (@inferred tovoigt(A_sym, offdiagscale=T2)) ≈ tovoigt(AA_sym ⊡ A_sym) - - nc(x::SecondOrderTensor) = length(x.data) - nc(x::FourthOrderTensor) = Int(sqrt(length(x.data))) - x = zeros(T, nc(A) + 1); tovoigt!(x, A; offset=1) - @test x[2:end] == tovoigt(A) - x = zeros(T, nc(A_sym) + 1); tovoigt!(x, A_sym; offset=1) - @test x[2:end] == tovoigt(A_sym) - x = zeros(T, nc(AA) + 1, nc(AA) + 1); tovoigt!(x, AA; offset_i=1, offset_j=1) - @test x[2:end, 2:end] == tovoigt(AA) - x = zeros(T, nc(AA_sym) + 1, nc(AA_sym) + 1); tovoigt!(x, AA_sym; offset_i=1, offset_j=1) - @test x[2:end, 2:end] == tovoigt(AA_sym) - - x = zeros(T, nc(A) + 1); tomandel!(x, A; offset=1) - @test x[2:end] == tomandel(A) - x = zeros(T, nc(A_sym) + 1); tomandel!(x, A_sym; offset=1) - @test x[2:end] == tomandel(A_sym) - x = zeros(T, nc(AA) + 1, nc(AA) + 1); tomandel!(x, AA; offset_i=1, offset_j=1) - @test x[2:end, 2:end] == tomandel(AA) - x = zeros(T, nc(AA_sym) + 1, nc(AA_sym) + 1); tomandel!(x, AA_sym; offset_i=1, offset_j=1) - @test x[2:end, 2:end] == tomandel(AA_sym) - - @test (@inferred fromvoigt(Tensor{2,dim}, tovoigt(A))) ≈ A - @test (@inferred fromvoigt(Tensor{4,dim}, tovoigt(AA))) ≈ AA - @test (@inferred fromvoigt(SymmetricTensor{2,dim}, tovoigt(A_sym, offdiagscale=T2), offdiagscale=T2)) ≈ A_sym - @test (@inferred fromvoigt(SymmetricTensor{4,dim}, tovoigt(AA_sym, offdiagscale=T2), offdiagscale=T2)) ≈ AA_sym - - @test (@inferred fromvoigt(SymmetricTensor{2,dim}, tovoigt(A_sym; order=abaqus); order=abaqus)) ≈ A_sym - @test (@inferred fromvoigt(SymmetricTensor{4,dim}, tovoigt(AA_sym; order=abaqus); order=abaqus)) ≈ AA_sym - - @test (@inferred tomandel(AA_sym)) * (@inferred tomandel(A_sym)) ≈ tomandel(AA_sym ⊡ A_sym) - @test (@inferred frommandel(SymmetricTensor{2,dim}, tomandel(A_sym))) ≈ A_sym - @test (@inferred frommandel(SymmetricTensor{4,dim}, tomandel(AA_sym))) ≈ AA_sym - @test (@inferred tomandel(AA)) * (@inferred tomandel(A)) ≈ tomandel(AA ⊡ A) - @test (@inferred frommandel(Tensor{2,dim}, tomandel(A))) ≈ A - @test (@inferred frommandel(Tensor{4,dim}, tomandel(AA))) ≈ AA - - if T==Float64 - # Check that Boolean values are supported for fromvoigt - num_components = Int((dim^2+dim)/2) - @test isa(fromvoigt(SymmetricTensor{2,dim}, rand(num_components) .> 0.5), SymmetricTensor{2,dim,Bool}) - @test isa(fromvoigt(SymmetricTensor{4,dim}, rand(num_components,num_components) .> 0.5), SymmetricTensor{4,dim,Bool}) - - # Check that different types in the vector and tensor is supported via conversion - @test tovoigt!(zeros(Float32, nc(A)), A)::Vector{Float32} ≈ tovoigt(A) - @test tovoigt!(zeros(Float32, nc(A_sym)), A_sym)::Vector{Float32} ≈ tovoigt(A_sym) - @test tovoigt!(zeros(Float32, nc(AA), nc(AA)), AA)::Matrix{Float32} ≈ tovoigt(AA) - @test tovoigt!(zeros(Float32, nc(AA_sym), nc(AA_sym)), AA_sym)::Matrix{Float32} ≈ tovoigt(AA_sym) - end - - # Static voigt/mandel that use default order - s2 = [rand(SymmetricTensor{2,dim}) for dim in 1:3] - s4 = [rand(SymmetricTensor{4,dim}) for dim in 1:3] - t2 = [rand(Tensor{2,dim}) for dim in 1:3] - t4 = [rand(Tensor{4,dim}) for dim in 1:3] - for a in (s2, s4, t2, t4) - for b in a - @test tovoigt(SArray, b) == tovoigt(b) - @test tomandel(SArray, b) ≈ tomandel(b) - @test fromvoigt(Tensors.get_base(typeof(b)), tovoigt(SArray, b)) ≈ b - @test frommandel(Tensors.get_base(typeof(b)), tomandel(SArray, b)) ≈ b - end - end -end -end # of testsection -end # of testsection +function _permutedims(S::FourthOrderTensor{dim}, idx::NTuple{4,Int}) where dim + sort([idx...]) == [1,2,3,4] || throw(ArgumentError("Missing index.")) + neworder = sortperm([idx...]) + f = (i,j,k,l) -> S[[i,j,k,l][neworder]...] + return Tensor{4,dim}(f) +end + +@testsection "tensor ops" begin +for T in (Float32, Float64, F64), dim in (1,2,3) +println("T = $T, dim = $dim") +AA = rand(Tensor{4, dim, T}) +BB = rand(Tensor{4, dim, T}) +A3 = rand(Tensor{3, dim, T}) +B3 = rand(Tensor{3, dim, T}) +A = rand(Tensor{2, dim, T}) +B = rand(Tensor{2, dim, T}) +a = rand(Tensor{1, dim, T}) +b = rand(Tensor{1, dim, T}) + +AA_sym = rand(SymmetricTensor{4, dim, T}) +BB_sym = rand(SymmetricTensor{4, dim, T}) +A_sym = rand(SymmetricTensor{2, dim, T}) +B_sym = rand(SymmetricTensor{2, dim, T}) +symA = symmetric(A) +symB = symmetric(B) + +i,j,k,l = rand(1:dim,4) + +@testsection "double contraction" begin + # 4 - 4 + @test vec((@inferred dcontract(AA, BB))::Tensor{4, dim, T}) ≈ vec(collect(reshape(vec(AA), (dim^2, dim^2))) * collect(reshape(vec(BB), (dim^2, dim^2)))) + @test vec((@inferred dcontract(AA_sym, BB))::Tensor{4, dim, T}) ≈ vec(collect(reshape(vec(AA_sym), (dim^2, dim^2))) * collect(reshape(vec(BB), (dim^2, dim^2)))) + @test vec((@inferred dcontract(AA, BB_sym))::Tensor{4, dim, T}) ≈ vec(collect(reshape(vec(AA), (dim^2, dim^2))) * collect(reshape(vec(BB_sym), (dim^2, dim^2)))) + @test vec((@inferred dcontract(AA_sym, BB_sym))::SymmetricTensor{4, dim, T}) ≈ vec(collect(reshape(vec(AA_sym), (dim^2, dim^2))) * collect(reshape(vec(BB_sym), (dim^2, dim^2)))) + @test dcontract(convert(Tensor, AA_sym), convert(Tensor, BB_sym)) ≈ dcontract(AA_sym, BB_sym) + + # 3 - 4 + @test dcontract(AA, A3) ≈ Tensor{3,dim}((i,j,k) -> sum(AA[i,j,m,n]*A3[m,n,k] for m in 1:dim, n in 1:dim)) + @test dcontract(A3, AA) ≈ Tensor{3,dim}((i,j,k) -> sum(A3[i,m,n]*AA[m,n,j,k] for m in 1:dim, n in 1:dim)) + + # 2 - 4 + @test (@inferred dcontract(AA, A))::Tensor{2, dim, T} ≈ reshape(collect(reshape(vec(AA), (dim^2, dim^2))) * collect(reshape(vec(A), (dim^2,))), dim, dim) + @test (@inferred dcontract(AA_sym, A))::SymmetricTensor{2, dim, T} ≈ reshape(collect(reshape(vec(AA_sym), (dim^2, dim^2))) * collect(reshape(vec(A), (dim^2,))), dim, dim) + @test (@inferred dcontract(AA, A_sym))::Tensor{2, dim, T} ≈ reshape(collect(reshape(vec(AA), (dim^2, dim^2))) * collect(reshape(vec(A_sym), (dim^2,))), dim, dim) + @test (@inferred dcontract(AA_sym, A_sym))::SymmetricTensor{2, dim, T} ≈ reshape(collect(reshape(vec(AA_sym), (dim^2, dim^2))) * collect(reshape(vec(A_sym), (dim^2,))), dim, dim) + @test (@inferred dcontract(A, AA))::Tensor{2, dim, T} ≈ reshape(collect(reshape(vec(AA), (dim^2, dim^2))') * collect(reshape(vec(A), (dim^2,))), dim, dim) + @test (@inferred dcontract(A_sym, AA))::Tensor{2, dim, T} ≈ reshape(collect(reshape(vec(AA), (dim^2, dim^2))') * collect(reshape(vec(A_sym), (dim^2,))), dim, dim) + @test (@inferred dcontract(A, AA_sym))::SymmetricTensor{2, dim, T} ≈ reshape(collect(reshape(vec(AA_sym), (dim^2, dim^2))') * collect(reshape(vec(A), (dim^2,))), dim, dim) + @test (@inferred dcontract(A_sym, AA_sym))::SymmetricTensor{2, dim, T} ≈ reshape(collect(reshape(vec(AA_sym), (dim^2, dim^2))') * collect(reshape(vec(A_sym), (dim^2,))), dim, dim) + @test dcontract(convert(Tensor, AA_sym), convert(Tensor, A_sym)) ≈ dcontract(AA_sym, A_sym) + + # 2 - 3 + @test dcontract(A, A3) ≈ Tensor{1,dim}((i) -> sum(A[m,n]*A3[m,n,i] for m in 1:dim, n in 1:dim)) + @test dcontract(A3, A) ≈ Tensor{1,dim}((i) -> sum(A3[i,m,n]*A[m,n] for m in 1:dim, n in 1:dim)) + + # 2 - 2 + @test (@inferred dcontract(A, B))::T ≈ sum(vec(A) .* vec(B)) + @test (@inferred dcontract(A_sym, B))::T ≈ sum(vec(A_sym) .* vec(B)) + @test (@inferred dcontract(A, B_sym))::T ≈ sum(vec(A) .* vec(B_sym)) + @test (@inferred dcontract(A_sym, B_sym))::T ≈ sum(vec(A_sym) .* vec(B_sym)) +end # of testsection + +@testsection "outer products (otimes, otimesu, otimesl)" begin + # binary otimes + @test (@inferred otimes(a, b))::Tensor{2, dim, T} ≈ Array(a) * Array(b)' + @test reshape(vec((@inferred otimes(A, B))::Tensor{4, dim, T}), dim^2, dim^2) ≈ vec(A) * vec(B)' + @test reshape(vec((@inferred otimes(A_sym, B))::Tensor{4, dim, T}), dim^2, dim^2) ≈ vec(A_sym) * vec(B)' + @test reshape(vec((@inferred otimes(A, B_sym))::Tensor{4, dim, T}), dim^2, dim^2) ≈ vec(A) * vec(B_sym)' + @test reshape(vec((@inferred otimes(A_sym, B_sym))::SymmetricTensor{4, dim, T}), dim^2, dim^2) ≈ vec(A_sym) * vec(B_sym)' + # unary otimes + @test (@inferred otimes(a))::SymmetricTensor{2, dim, T} ≈ otimes(a, a) + + # binary otimesu + @test (@inferred otimesu(A, B))::Tensor{4, dim, T} ≈ _permutedims(otimes(A, B), (1,3,2,4)) + @test (@inferred otimesu(A_sym, B))::Tensor{4, dim, T} ≈ _permutedims(otimes(A_sym, B), (1,3,2,4)) + @test (@inferred otimesu(A, B_sym))::Tensor{4, dim, T} ≈ _permutedims(otimes(A, B_sym), (1,3,2,4)) + @test (@inferred otimesu(A_sym, B_sym))::Tensor{4, dim, T} ≈ _permutedims(otimes(A_sym, B_sym), (1,3,2,4)) + + # binary otimesl + @test (@inferred otimesl(A, B))::Tensor{4, dim, T} ≈ _permutedims(otimes(A, B), (1,3,4,2)) + @test (@inferred otimesl(A_sym, B))::Tensor{4, dim, T} ≈ _permutedims(otimes(A_sym, B), (1,3,4,2)) + @test (@inferred otimesl(A, B_sym))::Tensor{4, dim, T} ≈ _permutedims(otimes(A, B_sym), (1,3,4,2)) + @test (@inferred otimesl(A_sym, B_sym))::Tensor{4, dim, T} ≈ _permutedims(otimes(A_sym, B_sym), (1,3,4,2)) +end # of testsection + +@testsection "dot products" begin + # 1 - 1 + @test (@inferred dot(a, b))::T ≈ sum(Array(a) .* Array(b)) + # 1 - 2 + @test (@inferred dot(A, b))::Vec{dim, T} ≈ Array(A) * Array(b) + @test (@inferred dot(A_sym, b))::Vec{dim, T} ≈ Array(A_sym) * Array(b) + @test (@inferred dot(a, B))::Vec{dim, T} ≈ Array(B)' * Array(a) + @test (@inferred dot(a, B_sym))::Vec{dim, T} ≈ Array(B_sym)' * Array(a) + # 2 - 2 + # binary + @test (@inferred dot(A, B))::Tensor{2, dim, T} ≈ Array(A) * Array(B) + @test (@inferred dot(A_sym, B))::Tensor{2, dim, T} ≈ Array(A_sym) * Array(B) + @test (@inferred dot(A, B_sym))::Tensor{2, dim, T} ≈ Array(A) * Array(B_sym) + @test (@inferred dot(A_sym, B_sym))::Tensor{2, dim, T} ≈ Array(A_sym) * Array(B_sym) + # unary + @test (@inferred dot(A_sym))::SymmetricTensor{2, dim, T} ≈ dot(A_sym, A_sym) + @test (@inferred tdot(A))::SymmetricTensor{2, dim, T} ≈ dot(transpose(A), A) + @test (@inferred tdot(A_sym))::SymmetricTensor{2, dim, T} ≈ dot(transpose(A_sym), A_sym) + @test (@inferred dott(A))::SymmetricTensor{2, dim, T} ≈ dot(A, transpose(A)) + @test (@inferred dott(A_sym))::SymmetricTensor{2, dim, T} ≈ dot(A_sym, transpose(A_sym)) + # 2 - 4 + @test (@inferred dot(AA, B))::Tensor{4, dim, T} ≈ reshape(collect(reshape(vec(AA), (dim^3, dim))) * collect(reshape(vec(B), (dim, dim))), (dim, dim, dim, dim)) + @test (@inferred dot(B, AA))::Tensor{4, dim, T} ≈ reshape(collect(reshape(vec(B), (dim, dim))) * collect(reshape(vec(AA), (dim, dim^3))), (dim, dim, dim, dim)) + @test (@inferred dot(AA_sym, B))::Tensor{4, dim, T} ≈ reshape(collect(reshape(vec(AA_sym), (dim^3, dim))) * collect(reshape(vec(B), (dim, dim))), (dim, dim, dim, dim)) + @test (@inferred dot(B, AA_sym))::Tensor{4, dim, T} ≈ reshape(collect(reshape(vec(B), (dim, dim))) * collect(reshape(vec(AA_sym), (dim, dim^3))), (dim, dim, dim, dim)) + @test (@inferred dot(AA, B_sym))::Tensor{4, dim, T} ≈ reshape(collect(reshape(vec(AA), (dim^3, dim))) * collect(reshape(vec(B_sym), (dim, dim))), (dim, dim, dim, dim)) + @test (@inferred dot(B_sym, AA))::Tensor{4, dim, T} ≈ reshape(collect(reshape(vec(B_sym), (dim, dim))) * collect(reshape(vec(AA), (dim, dim^3))), (dim, dim, dim, dim)) + @test (@inferred dot(AA_sym, B_sym))::Tensor{4, dim, T} ≈ reshape(collect(reshape(vec(AA_sym), (dim^3, dim))) * collect(reshape(vec(B_sym), (dim, dim))), (dim, dim, dim, dim)) + @test (@inferred dot(B_sym, AA_sym))::Tensor{4, dim, T} ≈ reshape(collect(reshape(vec(B_sym), (dim, dim))) * collect(reshape(vec(AA_sym), (dim, dim^3))), (dim, dim, dim, dim)) +end # of testsection + +@testsection "symmetric/skew-symmetric" begin + if dim == 1 # non-symmetric tensors are symmetric + @test (@inferred issymmetric(A)) + @test (@inferred issymmetric(AA)) + elseif dim != 1 + @test !(@inferred issymmetric(A)) + @test !(@inferred issymmetric(AA)) + @test !(@inferred ismajorsymmetric(AA)) + @test !(@inferred isminorsymmetric(AA)) + @test_throws InexactError convert(typeof(A_sym),A) + @test_throws InexactError convert(typeof(AA_sym),AA) + end + @test (@inferred issymmetric(A_sym)) + @test (@inferred issymmetric(AA_sym)) + @test (@inferred isminorsymmetric(AA_sym)) + @test (@inferred issymmetric(symmetric(A))) + @test (@inferred issymmetric(A + A')) + + @test (@inferred symmetric(A))::SymmetricTensor{2, dim, T} ≈ 0.5(A + A') + @test (@inferred symmetric(A_sym))::SymmetricTensor{2, dim, T} ≈ A_sym + @test convert(typeof(A_sym),convert(Tensor,symmetric(A))) ≈ symmetric(A) + + @test (@inferred symmetric(AA))::SymmetricTensor{4, dim, T} ≈ (@inferred minorsymmetric(AA))::SymmetricTensor{4, dim, T} + @test minorsymmetric(AA)[i,j,k,l] ≈ minorsymmetric(AA)[j,i,l,k] + @test issymmetric(convert(Tensor,minorsymmetric(AA))) + @test isminorsymmetric(convert(Tensor,minorsymmetric(AA))) + @test (@inferred symmetric(AA_sym))::SymmetricTensor{4, dim, T} ≈ (@inferred minorsymmetric(AA_sym))::SymmetricTensor{4, dim, T} + @test minorsymmetric(AA_sym)[i,j,k,l] ≈ minorsymmetric(AA_sym)[j,i,l,k] + @test minorsymmetric(AA_sym) ≈ AA_sym + @test issymmetric(convert(Tensor,minorsymmetric(AA_sym))) + @test isminorsymmetric(convert(Tensor,minorsymmetric(AA_sym))) + @test minorsymmetric(A⊗B) ≈ symA⊗symB + + @test convert(typeof(AA_sym),convert(Tensor,minorsymmetric(AA))) ≈ minorsymmetric(AA) + + @test ((@inferred majorsymmetric(AA))::Tensor{4, dim, T})[i,j,k,l] ≈ 0.5*(AA[i,j,k,l] + AA[k,l,i,j]) + @test majorsymmetric(AA)[i,j,k,l] ≈ majorsymmetric(AA)[k,l,i,j] + @test ismajorsymmetric(majorsymmetric(AA)) + @test ((@inferred majorsymmetric(AA_sym))::SymmetricTensor{4, dim, T})[i,j,k,l] ≈ 0.5*(AA_sym[i,j,k,l] + AA_sym[k,l,i,j]) + @test majorsymmetric(AA_sym)[i,j,k,l] ≈ majorsymmetric(AA_sym)[k,l,i,j] + @test ismajorsymmetric(majorsymmetric(AA_sym)) + + @test (@inferred skew(A))::Tensor{2, dim, T} ≈ 0.5(A - A') + @test (@inferred skew(A_sym))::Tensor{2, dim, T} ≈ zero(A_sym) + + # Identities + @test A ≈ symmetric(A) + skew(A) + @test skew(A) ≈ -skew(A)' + @test tr(skew(A)) ≈ 0.0 + @test tr(symmetric(A)) ≈ tr(A) +end # of testsection + +@testsection "transpose" begin + @test (@inferred transpose(A))::Tensor{2, dim, T} ≈ Array(A)' + @test transpose(transpose(A)) ≈ A + @test (@inferred transpose(A_sym))::SymmetricTensor{2, dim, T} ≈ A_sym ≈ Array(A_sym)' + @test transpose(transpose(A_sym)) ≈ A_sym + + @test (@inferred transpose(AA))::Tensor{4, dim, T} ≈ (@inferred minortranspose(AA))::Tensor{4, dim, T} + @test AA[i,j,k,l] ≈ minortranspose(AA)[j,i,l,k] + @test AA_sym[i,j,k,l] ≈ minortranspose(AA_sym)[j,i,l,k] + @test (@inferred transpose(AA_sym))::SymmetricTensor{4, dim, T} ≈ (@inferred minortranspose(AA_sym))::SymmetricTensor{4, dim, T} ≈ AA_sym + @test minortranspose(minortranspose(AA)) ≈ AA + @test minortranspose(minortranspose(AA_sym)) ≈ AA_sym + + @test majortranspose((@inferred majortranspose(AA))::Tensor{4, dim, T}) ≈ AA + @test majortranspose((@inferred majortranspose(AA_sym))::Tensor{4, dim, T}) ≈ AA_sym + @test AA[i,j,k,l] ≈ majortranspose(AA)[k,l,i,j] + @test AA_sym[i,j,k,l] ≈ majortranspose(AA_sym)[k,l,i,j] + + @test minortranspose(AA) ≈ _permutedims(AA,(2,1,4,3)) + @test minortranspose(AA_sym) ≈ _permutedims(AA_sym,(2,1,4,3)) + @test majortranspose(AA) ≈ _permutedims(AA,(3,4,1,2)) + @test majortranspose(AA_sym) ≈ _permutedims(AA_sym,(3,4,1,2)) +end # of testsection + +@testsection "cross product" begin + @test (@inferred a × a)::Vec{3, T} ≈ Vec{3, T}((0.0,0.0,0.0)) + @test a × b ≈ -b × a + if dim == 2 + ad = Vec{2, T}((1.0,0.0)) + ad2 = Vec{2, T}((0.0,1.0)) + @test (@inferred ad × ad2)::Vec{3, T} ≈ Vec{3, T}((0.0, 0.0, 1.0)) + end + if dim == 3 + ad = Vec{3, T}((1.0,0.0,0.0)) + ad2 = Vec{3, T}((0.0,1.0,0.0)) + @test (@inferred ad × ad2)::Vec{3, T} ≈ Vec{3, T}((0.0, 0.0, 1.0)) + end + if T == Float64 # mixed eltype + @test rand(Vec{dim,Float64}) × rand(Vec{dim,Float32}) isa Vec{3,Float64} + end +end # of testsection + +@testsection "special" begin + AAT = Tensor{4, dim, T}((i,j,k,l) -> AA_sym[i,l,k,j]) + @test AAT ⊡ (b ⊗ a) ≈ (@inferred dotdot(a, AA_sym, b))::Tensor{2, dim, T} +end # of testsection + +@testsection "rotation" begin + if dim == 3 + x = eᵢ(Vec{3, T}, 1) + y = eᵢ(Vec{3, T}, 2) + z = eᵢ(Vec{3, T}, 3) + + @test (@inferred rotate(z, z, rand(T)))::Vec{3, T} ≈ z + @test rotate(2*z, y, π/2) ≈ 2*x + @test rotate(3*z, y, π) ≈ -3*z + @test rotate(x+y+z, z, π/4) ≈ Vec{3}((0.0,√2,1.0)) + + @test rotate(a, b, 0) ≈ a ≈ rotation_tensor(b, 0) ⋅ a + @test rotate(a, b, π) ≈ rotate(a, b, -π) + @test rotate(a, b, π) ≈ rotation_tensor(b, π) ⋅ a rtol=1e-6 + @test rotate(a, b, π/2) ≈ rotate(a, -b, -π/2) + @test rotate(a, b, π/2) ≈ rotation_tensor(b, π/2) ⋅ a rtol=1e-6 + + @test rotate(A, x, π) ≈ rotate(A, x, -π) + @test rotate(rotate(rotate(A, x, π), y, π), z, π) ≈ A + @test rotate(A, a, 0) ≈ A + @test rotate(A, a, π/2) ≈ rotate(A, -a, -π/2) + A_sym_T = convert(Tensor{2,3}, A_sym) + @test rotate(A_sym, x, π)::SymmetricTensor ≈ rotate(A_sym, x, -π)::SymmetricTensor ≈ rotate(A_sym_T, x, -π) + @test rotate(rotate(rotate(A_sym, x, π), y, π), z, π)::SymmetricTensor ≈ A_sym + @test rotate(A_sym, a, 0) ≈ A_sym + @test rotate(A_sym, a, π/2) ≈ rotate(A_sym, -a, -π/2) + @test rotate(A_sym, π/4, π/4, π/4) ≈ rotate(A_sym, -3π/4, 3π/4, -3π/4) + @test rotate(A, π/4, π/4, π/4) ≈ rotate(A, -3π/4, 3π/4, -3π/4) + @test rotate(one(A), π/4, π/4, π/4) ≈ rotate(one(A), -3π/4, 3π/4, -3π/4) ≈ one(A) + + @test rotate(AA, x, π)::Tensor ≈ rotate(AA, x, -π) + @test rotate(rotate(rotate(AA, x, π), y, π), z, π) ≈ AA + @test rotate(AA, a, 0) ≈ AA + @test rotate(AA, a, π/2) ≈ rotate(AA, -a, -π/2) + AA_sym_T = convert(Tensor{4,3}, AA_sym) + @test rotate(AA_sym, x, π)::SymmetricTensor ≈ rotate(AA_sym, x, -π) ≈ rotate(AA_sym_T, x, π) + @test rotate(rotate(rotate(AA_sym, x, π), y, π), z, π) ≈ AA_sym + @test rotate(AA_sym, a, 0) ≈ AA_sym + @test rotate(AA_sym, a, π/2) ≈ rotate(AA_sym, -a, -π/2) + + v1, v2, v3, v4, axis = [rand(Vec{3,T}) for _ in 1:5] + α = rand(T) * π + R = rotation_tensor(axis, α) + v1v2v3v4 = (v1 ⊗ v2) ⊗ (v3 ⊗ v4) + Rv1v2v3v4 = ((R ⋅ v1) ⊗ (R ⋅ v2)) ⊗ ((R ⋅ v3) ⊗ (R ⋅ v4)) + @test rotate(v1v2v3v4, axis, α) ≈ Rv1v2v3v4 + v1v1v2v2 = otimes(v1) ⊗ otimes(v2) + Rv1v1v2v2 = otimes(R ⋅ v1) ⊗ otimes(R ⋅ v2) + @test rotate(v1v1v2v2, axis, α) ≈ Rv1v1v2v2 + end + if dim == 2 + θ = T(pi/2) + zT = zero(T) + z = Vec{3}((zT, zT, one(T))) + r = 1:2 + R = rotation_tensor(θ) + R3 = rotation_tensor(z, θ) + @test R ≈ R3[r, r] + a3 = Vec{3}((a[1], a[2], zT)) + @test rotate(a, θ) ≈ R ⋅ a ≈ rotate(a3, z, θ)[r] + A3 = Tensor{2,3}((i,j) -> i < 3 && j < 3 ? A[i, j] : zT) + @test rotate(A, θ) ≈ rotate(A3, z, θ)[r, r] + A3s = SymmetricTensor{2,3}((i,j) -> i < 3 && j < 3 ? A_sym[i, j] : zT) + @test rotate(A_sym, θ) ≈ rotate(A3s, z, θ)[r, r] + AA3 = Tensor{4,3}((i,j,k,l) -> i < 3 && j < 3 && k < 3 && l < 3 ? AA[i, j, k, l] : zT) + @test rotate(AA, θ) ≈ rotate(AA3, z, θ)[r, r, r, r] + AA3s = SymmetricTensor{4,3}((i,j,k,l) -> i < 3 && j < 3 && k < 3 && l < 3 ? AA_sym[i, j, k, l] : zT) + @test rotate(AA_sym, θ) ≈ rotate(AA3s, z, θ)[r, r, r, r] + end +end + +@testsection "tovoigt/fromvoigt" begin + # https://classes.engineering.wustl.edu/2009/spring/mase5513/abaqus/docs/v6.6/books/usb/default.htm?startat=pt01ch01s02aus02.html + abaqus = dim == 1 ? fill(1, 1, 1) : dim == 2 ? [1 3; 0 2] : [1 4 6; 0 2 5; 0 0 3] + T2 = T(2) + @test (@inferred tovoigt(AA)) * (@inferred tovoigt(A)) ≈ tovoigt(AA ⊡ A) + @test (@inferred tovoigt(AA_sym)) * (@inferred tovoigt(A_sym, offdiagscale=T2)) ≈ tovoigt(AA_sym ⊡ A_sym) + + nc(x::SecondOrderTensor) = length(x.data) + nc(x::FourthOrderTensor) = Int(sqrt(length(x.data))) + x = zeros(T, nc(A) + 1); tovoigt!(x, A; offset=1) + @test x[2:end] == tovoigt(A) + x = zeros(T, nc(A_sym) + 1); tovoigt!(x, A_sym; offset=1) + @test x[2:end] == tovoigt(A_sym) + x = zeros(T, nc(AA) + 1, nc(AA) + 1); tovoigt!(x, AA; offset_i=1, offset_j=1) + @test x[2:end, 2:end] == tovoigt(AA) + x = zeros(T, nc(AA_sym) + 1, nc(AA_sym) + 1); tovoigt!(x, AA_sym; offset_i=1, offset_j=1) + @test x[2:end, 2:end] == tovoigt(AA_sym) + + x = zeros(T, nc(A) + 1); tomandel!(x, A; offset=1) + @test x[2:end] == tomandel(A) + x = zeros(T, nc(A_sym) + 1); tomandel!(x, A_sym; offset=1) + @test x[2:end] == tomandel(A_sym) + x = zeros(T, nc(AA) + 1, nc(AA) + 1); tomandel!(x, AA; offset_i=1, offset_j=1) + @test x[2:end, 2:end] == tomandel(AA) + x = zeros(T, nc(AA_sym) + 1, nc(AA_sym) + 1); tomandel!(x, AA_sym; offset_i=1, offset_j=1) + @test x[2:end, 2:end] == tomandel(AA_sym) + + @test (@inferred fromvoigt(Tensor{2,dim}, tovoigt(A))) ≈ A + @test (@inferred fromvoigt(Tensor{4,dim}, tovoigt(AA))) ≈ AA + @test (@inferred fromvoigt(SymmetricTensor{2,dim}, tovoigt(A_sym, offdiagscale=T2), offdiagscale=T2)) ≈ A_sym + @test (@inferred fromvoigt(SymmetricTensor{4,dim}, tovoigt(AA_sym, offdiagscale=T2), offdiagscale=T2)) ≈ AA_sym + + @test (@inferred fromvoigt(SymmetricTensor{2,dim}, tovoigt(A_sym; order=abaqus); order=abaqus)) ≈ A_sym + @test (@inferred fromvoigt(SymmetricTensor{4,dim}, tovoigt(AA_sym; order=abaqus); order=abaqus)) ≈ AA_sym + + @test (@inferred tomandel(AA_sym)) * (@inferred tomandel(A_sym)) ≈ tomandel(AA_sym ⊡ A_sym) + @test (@inferred frommandel(SymmetricTensor{2,dim}, tomandel(A_sym))) ≈ A_sym + @test (@inferred frommandel(SymmetricTensor{4,dim}, tomandel(AA_sym))) ≈ AA_sym + @test (@inferred tomandel(AA)) * (@inferred tomandel(A)) ≈ tomandel(AA ⊡ A) + @test (@inferred frommandel(Tensor{2,dim}, tomandel(A))) ≈ A + @test (@inferred frommandel(Tensor{4,dim}, tomandel(AA))) ≈ AA + + if T==Float64 + # Check that Boolean values are supported for fromvoigt + num_components = Int((dim^2+dim)/2) + @test isa(fromvoigt(SymmetricTensor{2,dim}, rand(num_components) .> 0.5), SymmetricTensor{2,dim,Bool}) + @test isa(fromvoigt(SymmetricTensor{4,dim}, rand(num_components,num_components) .> 0.5), SymmetricTensor{4,dim,Bool}) + + # Check that different types in the vector and tensor is supported via conversion + @test tovoigt!(zeros(Float32, nc(A)), A)::Vector{Float32} ≈ tovoigt(A) + @test tovoigt!(zeros(Float32, nc(A_sym)), A_sym)::Vector{Float32} ≈ tovoigt(A_sym) + @test tovoigt!(zeros(Float32, nc(AA), nc(AA)), AA)::Matrix{Float32} ≈ tovoigt(AA) + @test tovoigt!(zeros(Float32, nc(AA_sym), nc(AA_sym)), AA_sym)::Matrix{Float32} ≈ tovoigt(AA_sym) + end + + # Static voigt/mandel that use default order + s2 = [rand(SymmetricTensor{2,dim}) for dim in 1:3] + s4 = [rand(SymmetricTensor{4,dim}) for dim in 1:3] + t2 = [rand(Tensor{2,dim}) for dim in 1:3] + t4 = [rand(Tensor{4,dim}) for dim in 1:3] + for a in (s2, s4, t2, t4) + for b in a + @test tovoigt(SArray, b) == tovoigt(b) + @test tomandel(SArray, b) ≈ tomandel(b) + @test fromvoigt(Tensors.get_base(typeof(b)), tovoigt(SArray, b)) ≈ b + @test frommandel(Tensors.get_base(typeof(b)), tomandel(SArray, b)) ≈ b + end + end +end +end # of testsection +end # of testsection